/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"log"
"net/http"
"os"
"strconv"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun"
"github.com/tektoncd/pipeline/pkg/reconciler/resolutionrequest"
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/rest"
"k8s.io/utils/clock"
filteredinformerfactory "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
"knative.dev/pkg/signals"
)
const (
// ControllerLogKey is the name of the logger for the controller cmd
ControllerLogKey = "tekton-pipelines-controller"
)
func main() {
if val, ok := os.LookupEnv("THREADS_PER_CONTROLLER"); ok {
threadsPerController, err := strconv.Atoi(val)
if err != nil {
log.Fatalf("failed to parse value %q of THREADS_PER_CONTROLLER: %v\n", val, err)
}
controller.DefaultThreadsPerController = threadsPerController
}
flag.IntVar(&controller.DefaultThreadsPerController, "threads-per-controller", controller.DefaultThreadsPerController, "Threads (goroutines) to create per controller")
namespace := flag.String("namespace", corev1.NamespaceAll, "Namespace to restrict informer to. Optional, defaults to all namespaces.")
disableHighAvailability := flag.Bool("disable-ha", false, "Whether to disable high-availability functionality for this component. This flag will be deprecated "+
"and removed when we have promoted this feature to stable, so do not pass it without filing an "+
"issue upstream!")
opts := &pipeline.Options{}
flag.StringVar(&opts.Images.EntrypointImage, "entrypoint-image", "", "The container image containing our entrypoint binary.")
flag.StringVar(&opts.Images.SidecarLogResultsImage, "sidecarlogresults-image", "", "The container image containing the binary for accessing results.")
flag.StringVar(&opts.Images.NopImage, "nop-image", "", "The container image used to stop sidecars")
flag.StringVar(&opts.Images.ShellImage, "shell-image", "", "The container image containing a shell")
flag.StringVar(&opts.Images.ShellImageWin, "shell-image-win", "", "The container image containing a windows shell")
flag.StringVar(&opts.Images.WorkingDirInitImage, "workingdirinit-image", "", "The container image containing our working dir init binary.")
flag.DurationVar(&opts.ResyncPeriod, "resync-period", controller.DefaultResyncPeriod, "The period between two resync run (going through all objects)")
// This parses flags.
cfg := injection.ParseAndGetRESTConfigOrDie()
if err := opts.Images.Validate(); err != nil {
log.Fatal(err)
}
if cfg.QPS == 0 {
cfg.QPS = 2 * rest.DefaultQPS
}
if cfg.Burst == 0 {
cfg.Burst = rest.DefaultBurst
}
// FIXME(vdemeester): this is here to not break current behavior
// multiply by 2, no of controllers being created
cfg.QPS = 2 * cfg.QPS
cfg.Burst = 2 * cfg.Burst
ctx := injection.WithNamespaceScope(signals.NewContext(), *namespace)
if *disableHighAvailability {
ctx = sharedmain.WithHADisabled(ctx)
}
// sets up liveness and readiness probes.
mux := http.NewServeMux()
mux.HandleFunc("/", handler)
mux.HandleFunc("/health", handler)
mux.HandleFunc("/readiness", handler)
port := os.Getenv("PROBES_PORT")
if port == "" {
port = "8080"
}
go func() {
// start the web server on port and accept requests
log.Printf("Readiness and health check server listening on port %s", port)
log.Fatal(http.ListenAndServe(":"+port, mux)) // #nosec G114 -- see https://github.com/securego/gosec#available-rules
}()
ctx = filteredinformerfactory.WithSelectors(ctx, v1beta1.ManagedByLabelKey)
ctx = controller.WithResyncPeriod(ctx, opts.ResyncPeriod)
sharedmain.MainWithConfig(ctx, ControllerLogKey, cfg,
taskrun.NewController(opts, clock.RealClock{}),
pipelinerun.NewController(opts, clock.RealClock{}),
resolutionrequest.NewController(clock.RealClock{}),
)
}
func handler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
func extractArgs(initialArgs []string) ([]string, []string) {
commandArgs := []string{}
args := initialArgs
if len(initialArgs) == 0 {
return args, commandArgs
}
// Detect if `--` is present, if it is, parse only the one before.
terminatorIndex := -1
for i, a := range initialArgs {
if a == "--" {
terminatorIndex = i
break
}
}
if terminatorIndex > 0 {
commandArgs = initialArgs[terminatorIndex+1:]
args = initialArgs[:terminatorIndex]
}
return args, commandArgs
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"log"
"os"
"os/exec"
"strings"
"syscall"
"time"
"github.com/tektoncd/pipeline/cmd/entrypoint/subcommands"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/types"
"github.com/tektoncd/pipeline/pkg/credentials/dockercreds"
"github.com/tektoncd/pipeline/pkg/credentials/gitcreds"
credwriter "github.com/tektoncd/pipeline/pkg/credentials/writer"
"github.com/tektoncd/pipeline/pkg/entrypoint"
"github.com/tektoncd/pipeline/pkg/platforms"
"github.com/tektoncd/pipeline/pkg/termination"
)
var (
ep = flag.String("entrypoint", "", "Original specified entrypoint to execute")
waitFiles = flag.String("wait_file", "", "Comma-separated list of paths to wait for")
waitFileContent = flag.Bool("wait_file_content", false, "If specified, expect wait_file to have content")
postFile = flag.String("post_file", "", "If specified, file to write upon completion")
terminationPath = flag.String("termination_path", "/tekton/termination", "If specified, file to write upon termination")
results = flag.String("results", "", "If specified, list of file names that might contain task results")
stepResults = flag.String("step_results", "", "step results if specified")
whenExpressions = flag.String("when_expressions", "", "when expressions if specified")
timeout = flag.Duration("timeout", time.Duration(0), "If specified, sets timeout for step")
stdoutPath = flag.String("stdout_path", "", "If specified, file to copy stdout to")
stderrPath = flag.String("stderr_path", "", "If specified, file to copy stderr to")
breakpointOnFailure = flag.Bool("breakpoint_on_failure", false, "If specified, expect steps to not skip on failure")
debugBeforeStep = flag.Bool("debug_before_step", false, "If specified, wait for a debugger to attach before executing the step")
onError = flag.String("on_error", "", "Set to \"continue\" to ignore an error and continue when a container terminates with a non-zero exit code."+
" Set to \"stopAndFail\" to declare a failure with a step error and stop executing the rest of the steps.")
stepMetadataDir = flag.String("step_metadata_dir", "", "If specified, create directory to store the step metadata e.g. /tekton/steps/<step-name>/")
resultExtractionMethod = flag.String("result_from", entrypoint.ResultExtractionMethodTerminationMessage, "The method using which to extract results from tasks. Default is using the termination message.")
)
const (
defaultWaitPollingInterval = time.Second
TektonPlatformCommandsEnv = "TEKTON_PLATFORM_COMMANDS"
)
func main() {
// Add credential flags originally introduced with our legacy credentials helper
// image (creds-init).
gitcreds.AddFlags(flag.CommandLine)
dockercreds.AddFlags(flag.CommandLine)
// Split args with `--` for the entrypoint and what it should execute
args, commandArgs := extractArgs(os.Args[1:])
// We are using the global variable flag.CommandLine here to be able
// to define what args it should parse.
// flag.Parse() does flag.CommandLine.Parse(os.Args[1:])
if err := flag.CommandLine.Parse(args); err != nil {
os.Exit(1)
}
if err := subcommands.Process(flag.CommandLine.Args()); err != nil {
log.Println(err.Error())
var ok subcommands.OK
if errors.As(err, &ok) {
return
}
os.Exit(1)
}
// Copy credentials we're expecting from the legacy credentials helper (creds-init)
// from secret volume mounts to /tekton/creds. This is done to support the expansion
// of a variable, $(credentials.path), that resolves to a single place with all the
// stored credentials.
builders := []credwriter.Writer{dockercreds.NewBuilder(), gitcreds.NewBuilder()}
for _, c := range builders {
if err := c.Write(entrypoint.CredsDir); err != nil {
log.Printf("Error initializing credentials: %s", err)
}
}
var cmd []string
if *ep != "" {
cmd = []string{*ep}
} else {
env := os.Getenv(TektonPlatformCommandsEnv)
var cmds map[string][]string
if err := json.Unmarshal([]byte(env), &cmds); err != nil {
log.Fatal(err)
}
// NB: This value contains OS/architecture and maybe variant.
// It doesn't include osversion, which is necessary to
// disambiguate two images both for e.g., Windows, that only
// differ by osversion.
plat := platforms.NewPlatform().Format()
var err error
cmd, err = selectCommandForPlatform(cmds, plat)
if err != nil {
log.Fatal(err)
}
}
var when v1.StepWhenExpressions
if len(*whenExpressions) > 0 {
if err := json.Unmarshal([]byte(*whenExpressions), &when); err != nil {
log.Fatal(err)
}
}
spireWorkloadAPI := initializeSpireAPI()
e := entrypoint.Entrypointer{
Command: append(cmd, commandArgs...),
WaitFiles: strings.Split(*waitFiles, ","),
WaitFileContent: *waitFileContent,
PostFile: *postFile,
TerminationPath: *terminationPath,
Waiter: &realWaiter{waitPollingInterval: defaultWaitPollingInterval, breakpointOnFailure: *breakpointOnFailure},
Runner: &realRunner{
stdoutPath: *stdoutPath,
stderrPath: *stderrPath,
},
PostWriter: &realPostWriter{},
Results: strings.Split(*results, ","),
StepResults: strings.Split(*stepResults, ","),
Timeout: timeout,
StepWhenExpressions: when,
BreakpointOnFailure: *breakpointOnFailure,
DebugBeforeStep: *debugBeforeStep,
OnError: *onError,
StepMetadataDir: *stepMetadataDir,
SpireWorkloadAPI: spireWorkloadAPI,
ResultExtractionMethod: *resultExtractionMethod,
}
// Copy any creds injected by the controller into the $HOME directory of the current
// user so that they're discoverable by git / ssh.
if err := credwriter.CopyCredsToHome(credwriter.CredsInitCredentials); err != nil {
log.Printf("non-fatal error copying credentials: %q", err)
}
if err := e.Go(); err != nil {
switch t := err.(type) { //nolint:errorlint // checking for multiple types with errors.As is ugly.
case entrypoint.DebugBeforeStepError:
log.Println("Skipping execute step script because before step breakpoint fail-continue")
os.Exit(1)
case entrypoint.SkipError:
log.Print("Skipping step because a previous step failed")
os.Exit(1)
case termination.MessageLengthError:
log.Print(err.Error())
os.Exit(1)
case entrypoint.ContextError:
if entrypoint.IsContextCanceledError(err) {
log.Print("Step was cancelled")
// use the SIGKILL signal to distinguish normal exit programs, just like kill -9 PID
os.Exit(int(syscall.SIGKILL))
} else {
log.Print(err.Error())
os.Exit(1)
}
case *exec.ExitError:
// Copied from https://stackoverflow.com/questions/10385551/get-exit-code-go
// This works on both Unix and Windows. Although
// package syscall is generally platform dependent,
// WaitStatus is defined for both Unix and Windows and
// in both cases has an ExitStatus() method with the
// same signature.
if status, ok := t.Sys().(syscall.WaitStatus); ok {
e.CheckForBreakpointOnFailure()
// ignore a step error i.e. do not exit if a container terminates with a non-zero exit code when onError is set to "continue"
if e.OnError != entrypoint.ContinueOnError {
os.Exit(status.ExitStatus())
}
}
// log and exit only if a step error must cause run failure
if e.OnError != entrypoint.ContinueOnError {
log.Fatalf("Error executing command (ExitError): %v", err)
}
default:
e.CheckForBreakpointOnFailure()
log.Fatalf("Error executing command: %v", err)
}
}
}
func selectCommandForPlatform(cmds map[string][]string, plat string) ([]string, error) {
cmd, found := cmds[plat]
if found {
return cmd, nil
}
// If the command wasn't found, check if there's a
// command defined for the same platform without a CPU
// variant specified.
platWithoutVariant := plat[:strings.LastIndex(plat, "/")]
cmd, found = cmds[platWithoutVariant]
if found {
return cmd, nil
}
return nil, fmt.Errorf("could not find command for platform %q", plat)
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"math"
"os/exec"
"syscall"
)
// We need the max value of an unsigned 32 bit integer (4294967295), but we also need this number
// to fit into an "int". One some systems this is 32 bits, so the max uint32 can't fit into here.
// maxIntForArch is the higher of those two values.
func maxIntForArch() int {
// We have to do over two lines as a variable. The go compiler optimizes
// away types for constants, so int(uint32(math.MaxUint32)) is the same as int(math.MaxUint32),
// which overflows.
maxUint := uint32(math.MaxUint32)
if int(maxUint) > math.MaxInt32 {
return int(maxUint)
}
return math.MaxInt32
}
// dropNetworking modifies the supplied exec.Cmd to execute in a net set of namespaces that do not
// have network access
func dropNetworking(cmd *exec.Cmd) {
// These flags control the behavior of the new process.
// Documentation for these is available here: https://man7.org/linux/man-pages/man2/clone.2.html
// We mostly want to just create a new network namespace, unattached to any networking devices.
// The other flags are necessary for that to work.
if cmd.SysProcAttr == nil {
// We build this up piecemeal in case it was already set, to avoid overwriting anything.
cmd.SysProcAttr = &syscall.SysProcAttr{}
}
cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWNS |
syscall.CLONE_NEWPID | // NEWPID creates a new process namespace
syscall.CLONE_NEWNET | // NEWNET creates a new network namespace (this is the one we really care about)
syscall.CLONE_NEWUSER // NEWUSER creates a new user namespace
// We need to map the existing user IDs into the new namespace.
// Just map everything.
cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: 0,
// Map all users
Size: maxIntForArch(),
},
}
// This is needed to allow programs to call setgroups when in a new Gid namespace.
// Things like apt-get install require this to work.
cmd.SysProcAttr.GidMappingsEnableSetgroups = true
// We need to map the existing group IDs into the new namespace.
// Just map everything.
cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: 0,
// Map all groups
Size: maxIntForArch(),
},
}
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"log"
"os"
"path/filepath"
"github.com/tektoncd/pipeline/pkg/entrypoint"
)
// realPostWriter actually writes files.
type realPostWriter struct{}
var _ entrypoint.PostWriter = (*realPostWriter)(nil)
// Write creates a file and writes content to that file if content is specified
func (*realPostWriter) Write(file string, content string) {
if file == "" {
return
}
// Create directory if it doesn't already exist
if err := os.MkdirAll(filepath.Dir(file), os.ModePerm); err != nil {
log.Fatalf("Error creating parent directory of %q: %v", file, err)
}
f, err := os.Create(file)
if err != nil {
log.Fatalf("Creating %q: %v", file, err)
}
// make sure that the file is closed at the end
defer f.Close()
if content != "" {
if _, err := f.WriteString(content); err != nil {
log.Fatalf("Writing %q: %v", file, err)
}
}
}
//go:build !windows
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/signal"
"path/filepath"
"sync"
"syscall"
"github.com/tektoncd/pipeline/pkg/entrypoint"
)
const (
TektonHermeticEnvVar = "TEKTON_HERMETIC"
)
// TODO(jasonhall): Test that original exit code is propagated and that
// stdout/stderr are collected -- needs e2e tests.
// realRunner actually runs commands.
type realRunner struct {
sync.Mutex
signals chan os.Signal
signalsClosed bool
stdoutPath string
stderrPath string
}
var _ entrypoint.Runner = (*realRunner)(nil)
// close closes the signals channel which is used to receive system signals.
func (rr *realRunner) close() {
rr.Lock()
defer rr.Unlock()
if rr.signals != nil && !rr.signalsClosed {
close(rr.signals)
rr.signalsClosed = true
}
}
// signal allows the caller to simulate the sending of a system signal.
func (rr *realRunner) signal(signal os.Signal) {
rr.Lock()
defer rr.Unlock()
if rr.signals != nil && !rr.signalsClosed {
rr.signals <- signal
}
}
// Run executes the entrypoint.
func (rr *realRunner) Run(ctx context.Context, args ...string) error {
if len(args) == 0 {
return nil
}
name, args := args[0], args[1:]
// Receive system signals on "rr.signals"
if rr.signals == nil {
rr.signals = make(chan os.Signal, 1)
}
defer rr.close()
signal.Notify(rr.signals)
defer signal.Reset()
cmd := exec.CommandContext(ctx, name, args...)
// if a standard output file is specified
// create the log file and add to the std multi writer
if rr.stdoutPath != "" {
stdout, err := newStdLogWriter(rr.stdoutPath)
if err != nil {
return err
}
defer stdout.Close()
cmd.Stdout = io.MultiWriter(os.Stdout, stdout)
} else {
cmd.Stdout = os.Stdout
}
if rr.stderrPath != "" {
stderr, err := newStdLogWriter(rr.stderrPath)
if err != nil {
return err
}
defer stderr.Close()
cmd.Stderr = io.MultiWriter(os.Stderr, stderr)
} else {
cmd.Stderr = os.Stderr
}
// dedicated PID group used to forward signals to
// main process and all children
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if os.Getenv("TEKTON_RESOURCE_NAME") == "" && os.Getenv(TektonHermeticEnvVar) == "1" {
dropNetworking(cmd)
}
// Start defined command
if err := cmd.Start(); err != nil {
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
return entrypoint.ErrContextDeadlineExceeded
}
if errors.Is(ctx.Err(), context.Canceled) {
return entrypoint.ErrContextCanceled
}
return err
}
// Goroutine for signals forwarding
go func() {
for s := range rr.signals {
// Forward signal to main process and all children
if s != syscall.SIGCHLD {
_ = syscall.Kill(-cmd.Process.Pid, s.(syscall.Signal))
}
}
}()
// Wait for command to exit
// as os.exec [note](https://github.com/golang/go/blob/ee522e2cdad04a43bc9374776483b6249eb97ec9/src/os/exec/exec.go#L897-L906)
// cmd.Wait prefer Process error over context error
// but we want to return context error instead
if err := cmd.Wait(); err != nil {
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
return entrypoint.ErrContextDeadlineExceeded
}
if errors.Is(ctx.Err(), context.Canceled) {
return entrypoint.ErrContextCanceled
}
return err
}
return nil
}
// newStdLogWriter create a new file writer that used for collecting std log
// the file is opened with os.O_WRONLY|os.O_CREATE|os.O_APPEND, and will not
// override any existing content in the path. This means that the same file can
// be used for multiple streams if desired. note that close after use
func newStdLogWriter(path string) (*os.File, error) {
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
return nil, fmt.Errorf("error creating parent directory: %w", err)
}
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return nil, fmt.Errorf("error opening %s: %w", path, err)
}
return f, nil
}
//go:build !disable_spire
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"log"
"github.com/tektoncd/pipeline/pkg/spire"
"github.com/tektoncd/pipeline/pkg/spire/config"
)
var (
enableSpire = flag.Bool("enable_spire", false, "If specified by configmap, this enables spire signing and verification")
socketPath = flag.String("spire_socket_path", "unix:///spiffe-workload-api/spire-agent.sock", "Experimental: The SPIRE agent socket for SPIFFE workload API.")
)
func initializeSpireAPI() spire.EntrypointerAPIClient {
if enableSpire != nil && *enableSpire && socketPath != nil && *socketPath != "" {
log.Println("SPIRE is enabled in this build, enableSpire is supported")
spireConfig := config.SpireConfig{
SocketPath: *socketPath,
}
return spire.NewEntrypointerAPIClient(&spireConfig)
}
return nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package subcommands
import (
"io"
"os"
)
// CopyCommand is the name of the copy command.
const CopyCommand = "cp"
// Owner has permission to write and execute, and anybody has
// permission to execute.
const dstPermissions = 0311
// cp copies a files from src to dst.
func cp(src, dst string) error {
s, err := os.Open(src)
if err != nil {
return err
}
defer s.Close()
d, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE, dstPermissions)
if err != nil {
return err
}
defer d.Close()
_, err = io.Copy(d, s)
return err
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package subcommands
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"os"
)
// DecodeScriptCommand is the command name for decoding scripts.
const DecodeScriptCommand = "decode-script"
// decodeScript rewrites a script file from base64 back into its original content from
// the Step definition.
func decodeScript(scriptPath string) error {
decodedBytes, permissions, err := decodeScriptFromFile(scriptPath)
if err != nil {
return fmt.Errorf("error decoding script file %q: %w", scriptPath, err)
}
err = os.WriteFile(scriptPath, decodedBytes, permissions)
if err != nil {
return fmt.Errorf("error writing decoded script file %q: %w", scriptPath, err)
}
return nil
}
// decodeScriptFromFile reads the script at scriptPath, decodes it from
// base64, and returns the decoded bytes w/ the permissions to use when re-writing
// or an error.
func decodeScriptFromFile(scriptPath string) ([]byte, os.FileMode, error) {
scriptFile, err := os.Open(scriptPath)
if err != nil {
return nil, 0, fmt.Errorf("error reading from script file %q: %w", scriptPath, err)
}
defer scriptFile.Close()
encoded := bytes.NewBuffer(nil)
if _, err = io.Copy(encoded, scriptFile); err != nil {
return nil, 0, fmt.Errorf("error reading from script file %q: %w", scriptPath, err)
}
fileInfo, err := scriptFile.Stat()
if err != nil {
return nil, 0, fmt.Errorf("error statting script file %q: %w", scriptPath, err)
}
perms := fileInfo.Mode().Perm()
decoded := make([]byte, base64.StdEncoding.DecodedLen(encoded.Len()))
n, err := base64.StdEncoding.Decode(decoded, encoded.Bytes())
if err != nil {
return nil, 0, fmt.Errorf("error decoding script file %q: %w", scriptPath, err)
}
return decoded[0:n], perms, nil
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package subcommands
// InitCommand is the name of main initialization command
const InitCommand = "init"
// init copies the entrypoint to the right place and sets up /tekton/steps directory for the pod.
// This expects the list of steps (in order matching the Task spec).
func entrypointInit(src, dst string, steps []string) error {
if err := cp(src, dst); err != nil {
return err
}
return stepInit(steps)
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package subcommands
import (
"log"
"os"
"path/filepath"
"strconv"
)
// StepInitCommand is the name of the /tekton/steps initialization command.
const StepInitCommand = "step-init"
var (
// root is the location of the Tekton root directory.
// Included as a global variable to allow overriding for tests.
tektonRoot = "/tekton"
)
// stepInit sets up the /tekton/steps directory for the pod.
// This expects the list of steps (in order matching the Task spec).
func stepInit(steps []string) error {
// Setup step directory symlinks - step data is written to a /tekton/run/<step>/status
// folder corresponding to each step - this is only mounted RW for the matching user step
// (and RO for all other steps).
// /tekton/steps provides a convenience symlink so that Tekton utilities to reference steps
// by name or index.
// NOTE: /tekton/steps may be removed in the future. Prefer using /tekton/run directly if
// possible.
// Create directory if it doesn't already exist
stepDir := filepath.Join(tektonRoot, "steps")
if err := os.MkdirAll(stepDir, os.ModePerm); err != nil {
log.Fatalf("Error creating steps directory %q: %v", stepDir, err)
}
for i, s := range steps {
run := filepath.Join(tektonRoot, "run", strconv.Itoa(i), "status")
if err := os.Symlink(run, filepath.Join(stepDir, s)); err != nil {
return err
}
if err := os.Symlink(run, filepath.Join(stepDir, strconv.Itoa(i))); err != nil {
return err
}
}
return nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package subcommands
import (
"fmt"
)
// OK is returned for successful subcommand executions.
type OK struct {
message string
}
func (err OK) Error() string {
return err.message
}
// Compile-time check that OK is an error type.
var _ error = OK{}
// SubcommandError is returned for failed subcommand executions.
type SubcommandError struct {
subcommand string
message string
}
func (err SubcommandError) Error() string {
return fmt.Sprintf("%s error: %s", err.subcommand, err.message)
}
// Process takes the set of arguments passed to entrypoint and executes any
// subcommand that the args call for. An error is returned to the caller to
// indicate that a subcommand was matched and to pass back its success/fail
// state. The returned error will be nil if no subcommand was matched to the
// passed args, OK if args matched and the subcommand
// succeeded, or any other error if the args matched but the subcommand failed.
func Process(args []string) error {
if len(args) == 0 {
return nil
}
switch args[0] {
case InitCommand:
// If invoked in "init mode" (`entrypoint init <src> <dst> [<step-name>]`),
// it will copy the src path to the dst path (like CopyCommand), and initialize
// the /tekton/steps folder (like StepInitCommand)
if len(args) >= 3 {
src, dst := args[1], args[2]
steps := args[3:]
if err := entrypointInit(src, dst, steps); err != nil {
return SubcommandError{subcommand: InitCommand, message: err.Error()}
}
return OK{message: "Entrypoint initialization"}
}
case CopyCommand:
// If invoked in "cp mode" (`entrypoint cp <src> <dst>`), simply copy
// the src path to the dst path. This is used to place the entrypoint
// binary in the tools directory, without requiring the cp command to
// exist in the base image.
if len(args) == 3 {
src, dst := args[1], args[2]
if err := cp(src, dst); err != nil {
return SubcommandError{subcommand: CopyCommand, message: err.Error()}
}
return OK{message: fmt.Sprintf("Copied %s to %s", src, dst)}
}
case DecodeScriptCommand:
// If invoked in "decode-script" mode (`entrypoint decode-script <src>`),
// read the script at <src> and overwrite it with its decoded content.
if len(args) == 2 {
src := args[1]
if err := decodeScript(src); err != nil {
return SubcommandError{subcommand: DecodeScriptCommand, message: err.Error()}
}
return OK{message: "Decoded script " + src}
}
case StepInitCommand:
if err := stepInit(args[1:]); err != nil {
return SubcommandError{subcommand: StepInitCommand, message: err.Error()}
}
return OK{message: "Setup /step directories"}
default:
}
return nil
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"errors"
"fmt"
"os"
"time"
"github.com/tektoncd/pipeline/pkg/entrypoint"
)
// realWaiter actually waits for files, by polling.
type realWaiter struct {
waitPollingInterval time.Duration
breakpointOnFailure bool
}
var _ entrypoint.Waiter = (*realWaiter)(nil)
// setWaitPollingInterval sets the pollingInterval that will be used by the wait function
func (rw *realWaiter) setWaitPollingInterval(pollingInterval time.Duration) *realWaiter {
rw.waitPollingInterval = pollingInterval
return rw
}
// Wait watches a file and returns when either a) the file exists and, if
// the expectContent argument is true, the file has non-zero size or b) there
// is an error polling the file.
//
// If the passed-in file is an empty string then this function returns
// immediately.
//
// If a file of the same name with a ".err" extension exists then this Wait
// will end with a skipError.
func (rw *realWaiter) Wait(ctx context.Context, file string, expectContent bool, breakpointOnFailure bool) error {
if file == "" {
return nil
}
for {
if info, err := os.Stat(file); err == nil {
if !expectContent || info.Size() > 0 {
return nil
}
} else if !os.IsNotExist(err) {
return fmt.Errorf("waiting for %q: %w", file, err)
}
// When a .err file is read by this step, it means that a previous step has failed
// We wouldn't want this step to stop executing because the previous step failed during debug
// That is counterproductive to debugging
// Hence we disable skipError here so that the other steps in the failed taskRun can continue
// executing if breakpointOnFailure is enabled for the taskRun
// TLDR: Do not return skipError when breakpointOnFailure is enabled as it breaks execution of the TaskRun
if _, err := os.Stat(file + ".err"); err == nil {
if breakpointOnFailure {
return nil
}
return entrypoint.ErrSkipPreviousStepFailed
}
select {
case <-ctx.Done():
if errors.Is(ctx.Err(), context.Canceled) {
return entrypoint.ErrContextCanceled
}
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
return entrypoint.ErrContextDeadlineExceeded
}
return nil
case <-time.After(rw.waitPollingInterval):
}
}
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"log"
"net/http"
"os"
"github.com/tektoncd/pipeline/pkg/reconciler/notifications/customrun"
"knative.dev/pkg/injection/sharedmain"
)
const eventsControllerName = "events-controller"
func main() {
// sets up liveness and readiness probes.
mux := http.NewServeMux()
mux.HandleFunc("/", handler)
mux.HandleFunc("/health", handler)
mux.HandleFunc("/readiness", handler)
port := os.Getenv("PROBES_PORT")
if port == "" {
port = "8080"
}
go func() {
// start the web server on port and accept requests
log.Printf("Readiness and health check server listening on port %s", port)
log.Fatal(http.ListenAndServe(":"+port, mux)) // #nosec G114 -- see https://github.com/securego/gosec#available-rules
}()
// start the events controller
sharedmain.Main(eventsControllerName, customrun.NewController())
}
func handler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"log"
"os"
"os/signal"
"syscall"
)
func main() {
if len(os.Args) >= 2 && os.Args[1] == "tekton_run_indefinitely" {
log.Println("Waiting indefinitely...")
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
log.Println("received signal:", <-ch)
}
log.Println("Exiting...")
os.Exit(0)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"log"
"os"
"strconv"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/bundle"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/cluster"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/git"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/http"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/hub"
hubresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/hub"
"k8s.io/client-go/rest"
filteredinformerfactory "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
"knative.dev/pkg/signals"
)
func main() {
if val, ok := os.LookupEnv("THREADS_PER_CONTROLLER"); ok {
threadsPerController, err := strconv.Atoi(val)
if err != nil {
log.Fatalf("failed to parse value %q of THREADS_PER_CONTROLLER: %v\n", val, err)
}
controller.DefaultThreadsPerController = threadsPerController
}
flag.IntVar(&controller.DefaultThreadsPerController, "threads-per-controller", controller.DefaultThreadsPerController, "Threads (goroutines) to create per controller")
ctx := filteredinformerfactory.WithSelectors(signals.NewContext(), v1alpha1.ManagedByLabelKey)
tektonHubURL := buildHubURL(os.Getenv("TEKTON_HUB_API"), "")
artifactHubURL := buildHubURL(os.Getenv("ARTIFACT_HUB_API"), hubresolution.DefaultArtifactHubURL)
// This parses flags.
cfg := injection.ParseAndGetRESTConfigOrDie()
if cfg.QPS == 0 {
cfg.QPS = 2 * rest.DefaultQPS
}
if cfg.Burst == 0 {
cfg.Burst = rest.DefaultBurst
}
// multiply by no of controllers being created
cfg.QPS = 5 * cfg.QPS
cfg.Burst = 5 * cfg.Burst
sharedmain.MainWithConfig(ctx, "controller", cfg,
framework.NewController(ctx, &git.Resolver{}),
framework.NewController(ctx, &hub.Resolver{TektonHubURL: tektonHubURL, ArtifactHubURL: artifactHubURL}),
framework.NewController(ctx, &bundle.Resolver{}),
framework.NewController(ctx, &cluster.Resolver{}),
framework.NewController(ctx, &http.Resolver{}))
}
func buildHubURL(configAPI, defaultURL string) string {
var hubURL string
if configAPI == "" {
hubURL = defaultURL
} else {
hubURL = configAPI
}
return strings.TrimSuffix(hubURL, "/")
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"flag"
"log"
"os"
"os/signal"
"strings"
"syscall"
"github.com/tektoncd/pipeline/internal/sidecarlogresults"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/pod"
)
func main() {
var resultsDir string
var resultNames string
var stepResultsStr string
var stepNames string
var kubernetesNativeSidecar bool
flag.StringVar(&resultsDir, "results-dir", pipeline.DefaultResultPath, "Path to the results directory. Default is /tekton/results")
flag.StringVar(&resultNames, "result-names", "", "comma separated result names to expect from the steps running in the pod. eg. foo,bar,baz")
flag.StringVar(&stepResultsStr, "step-results", "", "json containing a map of step Name as key and list of result Names. eg. {\"stepName\":[\"foo\",\"bar\",\"baz\"]}")
flag.StringVar(&stepNames, "step-names", "", "comma separated step names. eg. foo,bar,baz")
flag.BoolVar(&kubernetesNativeSidecar, "kubernetes-sidecar-mode", false, "If true, wait indefinitely after processing results (for Kubernetes native sidecar support)")
flag.Parse()
var done chan bool
// If kubernetesNativeSidecar is true, wait indefinitely to prevent container from exiting
// This is needed for Kubernetes native sidecar support
if kubernetesNativeSidecar {
// Set up signal handling for graceful shutdown
// Create a channel to receive OS signals.
sigCh := make(chan os.Signal, 1)
// Register the channel to receive notifications for specific signals.
// In this case, we are interested in SIGINT (Ctrl+C) and SIGTERM.
signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT)
// Create a channel to signal that the program should exit gracefully.
done = make(chan bool, 1)
// Start a goroutine to handle incoming signals.
go func() {
<-sigCh // Block until a signal is received.
done <- true // Signal that cleanup is done and the program can exit.
}()
}
var expectedResults []string
// strings.Split returns [""] instead of [] for empty string, we don't want pass [""] to other methods.
if len(resultNames) > 0 {
expectedResults = strings.Split(resultNames, ",")
}
expectedStepResults := map[string][]string{}
if err := json.Unmarshal([]byte(stepResultsStr), &expectedStepResults); err != nil {
log.Fatal(err)
}
err := sidecarlogresults.LookForResults(os.Stdout, pod.RunDir, resultsDir, expectedResults, pipeline.StepsDir, expectedStepResults)
if err != nil {
log.Fatal(err)
}
var names []string
if len(stepNames) > 0 {
names = strings.Split(stepNames, ",")
}
err = sidecarlogresults.LookForArtifacts(os.Stdout, names, pod.RunDir)
if err != nil {
log.Fatal(err)
}
if kubernetesNativeSidecar && done != nil {
// Wait for a signal to be received.
<-done
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"log"
"net/http"
"os"
"strings"
defaultconfig "github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/apis/resolution"
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
pkgleaderelection "knative.dev/pkg/leaderelection"
"knative.dev/pkg/logging"
"knative.dev/pkg/signals"
"knative.dev/pkg/system"
"knative.dev/pkg/webhook"
"knative.dev/pkg/webhook/certificates"
"knative.dev/pkg/webhook/configmaps"
"knative.dev/pkg/webhook/resourcesemantics"
"knative.dev/pkg/webhook/resourcesemantics/conversion"
"knative.dev/pkg/webhook/resourcesemantics/defaulting"
"knative.dev/pkg/webhook/resourcesemantics/validation"
)
var types = map[schema.GroupVersionKind]resourcesemantics.GenericCRD{
// v1alpha1
v1alpha1.SchemeGroupVersion.WithKind("VerificationPolicy"): &v1alpha1.VerificationPolicy{},
v1alpha1.SchemeGroupVersion.WithKind("StepAction"): &v1alpha1.StepAction{},
// v1beta1
v1beta1.SchemeGroupVersion.WithKind("Pipeline"): &v1beta1.Pipeline{},
v1beta1.SchemeGroupVersion.WithKind("Task"): &v1beta1.Task{},
v1beta1.SchemeGroupVersion.WithKind("TaskRun"): &v1beta1.TaskRun{},
v1beta1.SchemeGroupVersion.WithKind("PipelineRun"): &v1beta1.PipelineRun{},
v1beta1.SchemeGroupVersion.WithKind("CustomRun"): &v1beta1.CustomRun{},
v1beta1.SchemeGroupVersion.WithKind("StepAction"): &v1beta1.StepAction{},
// v1
v1.SchemeGroupVersion.WithKind("Task"): &v1.Task{},
v1.SchemeGroupVersion.WithKind("Pipeline"): &v1.Pipeline{},
v1.SchemeGroupVersion.WithKind("TaskRun"): &v1.TaskRun{},
v1.SchemeGroupVersion.WithKind("PipelineRun"): &v1.PipelineRun{},
// resolution
// v1alpha1
resolutionv1alpha1.SchemeGroupVersion.WithKind("ResolutionRequest"): &resolutionv1alpha1.ResolutionRequest{},
// v1beta1
resolutionv1beta1.SchemeGroupVersion.WithKind("ResolutionRequest"): &resolutionv1beta1.ResolutionRequest{},
}
func newDefaultingAdmissionController(name string) func(context.Context, configmap.Watcher) *controller.Impl {
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
// Decorate contexts with the current state of the config.
store := defaultconfig.NewStore(logging.FromContext(ctx).Named("config-store"))
store.WatchConfigs(cmw)
return defaulting.NewAdmissionController(ctx,
// Name of the resource webhook, it is the value of the environment variable WEBHOOK_ADMISSION_CONTROLLER_NAME
// default is "webhook.pipeline.tekton.dev"
name,
// The path on which to serve the webhook.
"/defaulting",
// The resources to validate and default.
types,
// A function that infuses the context passed to Validate/SetDefaults with custom metadata.
func(ctx context.Context) context.Context {
return store.ToContext(ctx)
},
// Whether to disallow unknown fields.
true,
)
}
}
func newValidationAdmissionController(name string) func(context.Context, configmap.Watcher) *controller.Impl {
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
// Decorate contexts with the current state of the config.
store := defaultconfig.NewStore(logging.FromContext(ctx).Named("config-store"))
store.WatchConfigs(cmw)
return validation.NewAdmissionController(ctx,
// Name of the validation webhook, it is based on the value of the environment variable WEBHOOK_ADMISSION_CONTROLLER_NAME
// default is "validation.webhook.pipeline.tekton.dev"
strings.Join([]string{"validation", name}, "."),
// The path on which to serve the webhook.
"/resource-validation",
// The resources to validate and default.
types,
// A function that infuses the context passed to Validate/SetDefaults with custom metadata.
func(ctx context.Context) context.Context {
return store.ToContext(ctx)
},
// Whether to disallow unknown fields.
true,
)
}
}
func newConfigValidationController(name string) func(context.Context, configmap.Watcher) *controller.Impl {
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
return configmaps.NewAdmissionController(ctx,
// Name of the configmap webhook, it is based on the value of the environment variable WEBHOOK_ADMISSION_CONTROLLER_NAME
// default is "config.webhook.pipeline.tekton.dev"
strings.Join([]string{"config", name}, "."),
// The path on which to serve the webhook.
"/config-validation",
// The configmaps to validate.
configmap.Constructors{
logging.ConfigMapName(): logging.NewConfigFromConfigMap,
defaultconfig.GetDefaultsConfigName(): defaultconfig.NewDefaultsFromConfigMap,
pkgleaderelection.ConfigMapName(): pkgleaderelection.NewConfigFromConfigMap,
defaultconfig.GetFeatureFlagsConfigName(): defaultconfig.NewFeatureFlagsFromConfigMap,
},
)
}
}
func newConversionController(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
var (
v1alpha1GroupVersion = v1alpha1.SchemeGroupVersion.Version
v1beta1GroupVersion = v1beta1.SchemeGroupVersion.Version
v1GroupVersion = v1.SchemeGroupVersion.Version
resolutionv1alpha1GroupVersion = resolutionv1alpha1.SchemeGroupVersion.Version
resolutionv1beta1GroupVersion = resolutionv1beta1.SchemeGroupVersion.Version
)
// Decorate contexts with the current state of the config.
store := defaultconfig.NewStore(logging.FromContext(ctx).Named("config-store"))
store.WatchConfigs(cmw)
return conversion.NewConversionController(ctx,
// The path on which to serve the webhook
"/resource-conversion",
// Specify the types of custom resource definitions that should be converted
// "HubVersion" specifies which version of the CustomResource supports
// conversions to and from all types.
// "Zygotes" are the supported versions.
map[schema.GroupKind]conversion.GroupKindConversion{
v1beta1.Kind("StepAction"): {
DefinitionName: pipeline.StepActionResource.String(),
HubVersion: v1alpha1GroupVersion,
Zygotes: map[string]conversion.ConvertibleObject{
v1alpha1GroupVersion: &v1alpha1.StepAction{},
v1beta1GroupVersion: &v1beta1.StepAction{},
},
},
v1.Kind("Task"): {
DefinitionName: pipeline.TaskResource.String(),
HubVersion: v1beta1GroupVersion,
Zygotes: map[string]conversion.ConvertibleObject{
v1beta1GroupVersion: &v1beta1.Task{},
v1GroupVersion: &v1.Task{},
},
},
v1.Kind("Pipeline"): {
DefinitionName: pipeline.PipelineResource.String(),
HubVersion: v1beta1GroupVersion,
Zygotes: map[string]conversion.ConvertibleObject{
v1beta1GroupVersion: &v1beta1.Pipeline{},
v1GroupVersion: &v1.Pipeline{},
},
},
v1.Kind("TaskRun"): {
DefinitionName: pipeline.TaskRunResource.String(),
HubVersion: v1beta1GroupVersion,
Zygotes: map[string]conversion.ConvertibleObject{
v1beta1GroupVersion: &v1beta1.TaskRun{},
v1GroupVersion: &v1.TaskRun{},
},
},
v1.Kind("PipelineRun"): {
DefinitionName: pipeline.PipelineRunResource.String(),
HubVersion: v1beta1GroupVersion,
Zygotes: map[string]conversion.ConvertibleObject{
v1beta1GroupVersion: &v1beta1.PipelineRun{},
v1GroupVersion: &v1.PipelineRun{},
},
},
resolutionv1beta1.Kind("ResolutionRequest"): {
DefinitionName: resolution.ResolutionRequestResource.String(),
HubVersion: resolutionv1alpha1GroupVersion,
Zygotes: map[string]conversion.ConvertibleObject{
resolutionv1alpha1GroupVersion: &resolutionv1alpha1.ResolutionRequest{},
resolutionv1beta1GroupVersion: &resolutionv1beta1.ResolutionRequest{},
},
},
},
// A function that infuses the context passed to ConvertTo/ConvertFrom/SetDefaults with custom metadata
func(ctx context.Context) context.Context {
return store.ToContext(ctx)
},
)
}
func main() {
serviceName := os.Getenv("WEBHOOK_SERVICE_NAME")
if serviceName == "" {
serviceName = "tekton-pipelines-webhook"
}
secretName := os.Getenv("WEBHOOK_SECRET_NAME")
if secretName == "" {
secretName = "webhook-certs" // #nosec
}
webhookName := os.Getenv("WEBHOOK_ADMISSION_CONTROLLER_NAME")
if webhookName == "" {
webhookName = "webhook.pipeline.tekton.dev"
}
var statsReporterOptions []webhook.StatsReporterOption
enableNamespace := os.Getenv("WEBHOOK_METRICS_ENABLE_NAMESPACE")
if enableNamespace != "true" {
statsReporterOptions = append(statsReporterOptions, webhook.WithoutTags("resource_namespace"))
}
// Scope informers to the webhook's namespace instead of cluster-wide
ctx := injection.WithNamespaceScope(signals.NewContext(), system.Namespace())
// Set up a signal context with our webhook options
ctx = webhook.WithOptions(ctx, webhook.Options{
ServiceName: serviceName,
Port: webhook.PortFromEnv(8443),
SecretName: secretName,
StatsReporterOptions: statsReporterOptions,
})
mux := http.NewServeMux()
mux.HandleFunc("/", handler)
mux.HandleFunc("/health", handler)
mux.HandleFunc("/readiness", handler)
port := os.Getenv("PROBES_PORT")
if port == "" {
port = "8080"
}
go func() {
// start the web server on port and accept requests
log.Printf("Readiness and health check server listening on port %s", port)
log.Fatal(http.ListenAndServe(":"+port, mux)) // #nosec G114 -- see https://github.com/securego/gosec#available-rules
}()
sharedmain.MainWithContext(ctx, serviceName,
certificates.NewController,
newDefaultingAdmissionController(webhookName),
newValidationAdmissionController(webhookName),
newConfigValidationController(webhookName),
newConversionController,
)
}
func handler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"log"
"os"
"path/filepath"
"runtime"
"strings"
)
func main() {
for i, d := range os.Args {
// os.Args[0] is the path to this executable, so we should skip it
if i == 0 {
continue
}
ws := cleanPath("/workspace/")
p := cleanPath(d)
if !filepath.IsAbs(p) || strings.HasPrefix(p, ws+string(filepath.Separator)) {
if err := os.MkdirAll(p, 0755); err != nil {
log.Fatalf("Failed to mkdir %q: %v", p, err)
}
}
}
}
func cleanPath(path string) string {
p := filepath.Clean(path)
if runtime.GOOS == "windows" {
// Append 'C:' if the path is absolute (i.e. it begins with a single '\')
if strings.HasPrefix(p, "\\") && !strings.HasPrefix(p, "\\\\") {
p = "C:" + p
}
}
return p
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"errors"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
filteredinformerfactory "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
"knative.dev/pkg/injection/sharedmain"
)
func main() {
ctx := filteredinformerfactory.WithSelectors(context.Background(), v1beta1.ManagedByLabelKey)
sharedmain.MainWithContext(ctx, "controller",
framework.NewController(ctx, &resolver{}),
)
}
// Deprecated
type resolver struct{}
// Initialize sets up any dependencies needed by the resolver. None atm.
func (r *resolver) Initialize(context.Context) error {
return nil
}
// GetName returns a string name to refer to this resolver by.
func (r *resolver) GetName(context.Context) string {
return "Demo"
}
// GetSelector returns a map of labels to match requests to this resolver.
func (r *resolver) GetSelector(context.Context) map[string]string {
return map[string]string{
common.LabelKeyResolverType: "demo",
}
}
// ValidateParams ensures parameters from a request are as expected.
func (r *resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
if len(params) > 0 {
return errors.New("no params allowed")
}
return nil
}
// Resolve uses the given params to resolve the requested file or resource.
func (r *resolver) Resolve(ctx context.Context, params []pipelinev1.Param) (framework.ResolvedResource, error) {
return &myResolvedResource{}, nil
}
// our hard-coded resolved file to return
const pipeline = `
apiVersion: tekton.dev/v1
kind: Pipeline
metadata:
name: my-pipeline
spec:
tasks:
- name: hello-world
taskSpec:
steps:
- image: alpine:3.15.1
script: |
echo "hello world"
`
// myResolvedResource wraps the data we want to return to Pipelines
type myResolvedResource struct{}
// Data returns the bytes of our hard-coded Pipeline
func (*myResolvedResource) Data() []byte {
return []byte(pipeline)
}
// Annotations returns any metadata needed alongside the data. None atm.
func (*myResolvedResource) Annotations() map[string]string {
return nil
}
// RefSource is the source reference of the remote data that records where the remote
// file came from including the url, digest and the entrypoint. None atm.
func (*myResolvedResource) RefSource() *pipelinev1.RefSource {
return nil
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"errors"
"fmt"
neturl "net/url"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
"github.com/tektoncd/pipeline/pkg/resolution/common"
frameworkV1 "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
filteredinformerfactory "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
"knative.dev/pkg/injection/sharedmain"
)
func main() {
ctx := filteredinformerfactory.WithSelectors(context.Background(), v1beta1.ManagedByLabelKey)
sharedmain.MainWithContext(ctx, "controller",
framework.NewController(ctx, &resolver{}),
)
}
type resolver struct{}
// Initialize sets up any dependencies needed by the resolver. None atm.
func (r *resolver) Initialize(context.Context) error {
return nil
}
// GetName returns a string name to refer to this resolver by.
func (r *resolver) GetName(context.Context) string {
return "Demo"
}
// GetSelector returns a map of labels to match requests to this resolver.
func (r *resolver) GetSelector(context.Context) map[string]string {
return map[string]string{
common.LabelKeyResolverType: "demo",
}
}
// Validate ensures resolution spec from a request is as expected.
func (r *resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
if len(req.Params) > 0 {
return errors.New("no params allowed")
}
u, err := neturl.ParseRequestURI(req.URL)
if err != nil {
return err
}
if u.Scheme != "demoscheme" {
return fmt.Errorf("Invalid Scheme. Want %s, Got %s", "demoscheme", u.Scheme)
}
return nil
}
// Resolve uses the given resolution spec to resolve the requested file or resource.
func (r *resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (frameworkV1.ResolvedResource, error) {
return &myResolvedResource{}, nil
}
// our hard-coded resolved file to return
const pipeline = `
apiVersion: tekton.dev/v1
kind: Pipeline
metadata:
name: my-pipeline
spec:
tasks:
- name: hello-world
taskSpec:
steps:
- image: alpine:3.15.1
script: |
echo "hello world"
`
// myResolvedResource wraps the data we want to return to Pipelines
type myResolvedResource struct{}
// Data returns the bytes of our hard-coded Pipeline
func (*myResolvedResource) Data() []byte {
return []byte(pipeline)
}
// Annotations returns any metadata needed alongside the data. None atm.
func (*myResolvedResource) Annotations() map[string]string {
return nil
}
// RefSource is the source reference of the remote data that records where the remote
// file came from including the url, digest and the entrypoint. None atm.
func (*myResolvedResource) RefSource() *pipelinev1.RefSource {
return nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"strings"
tektonv1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"k8s.io/klog"
"k8s.io/kube-openapi/pkg/common"
spec "k8s.io/kube-openapi/pkg/validation/spec"
)
func main() {
pipelinesVersion := flag.String("version", "v0.17.2", "Tekton Pipelines software version")
apiVersion := flag.String("apiVersion", "v1beta1", "API version")
flag.Parse()
version := *pipelinesVersion
if !strings.HasPrefix(version, "v") {
version = "v" + version
}
var oAPIDefs map[string]common.OpenAPIDefinition
switch *apiVersion {
case "v1alpha1":
oAPIDefs = tektonv1alpha1.GetOpenAPIDefinitions(func(name string) spec.Ref {
return spec.MustCreateRef("#/definitions/" + common.EscapeJsonPointer(swaggify(name)))
})
case "v1beta1":
oAPIDefs = tektonv1beta1.GetOpenAPIDefinitions(func(name string) spec.Ref {
return spec.MustCreateRef("#/definitions/" + common.EscapeJsonPointer(swaggify(name)))
})
case "v1":
oAPIDefs = tektonv1.GetOpenAPIDefinitions(func(name string) spec.Ref {
return spec.MustCreateRef("#/definitions/" + common.EscapeJsonPointer(swaggify(name)))
})
default:
panic("Unsupported API version: " + *apiVersion)
}
defs := spec.Definitions{}
for defName, val := range oAPIDefs {
defs[swaggify(defName)] = val.Schema
}
swagger := spec.Swagger{
SwaggerProps: spec.SwaggerProps{
Swagger: "2.0",
Definitions: defs,
Paths: &spec.Paths{Paths: map[string]spec.PathItem{}},
Info: &spec.Info{
InfoProps: spec.InfoProps{
Title: "Tekton",
Description: "Tekton Pipeline",
Version: version,
},
},
},
}
jsonBytes, err := json.MarshalIndent(swagger, "", " ")
if err != nil {
klog.Fatal(err.Error())
}
fmt.Println(string(jsonBytes))
}
func swaggify(name string) string {
name = strings.ReplaceAll(name, "./pkg/apis/pipeline/", "")
name = strings.ReplaceAll(name, "./pkg/apis/resource/", "")
name = strings.ReplaceAll(name, "github.com/tektoncd/pipeline/pkg/apis/pipeline/", "")
name = strings.ReplaceAll(name, "github.com/tektoncd/pipeline/pkg/apis/resolution/", "")
name = strings.ReplaceAll(name, "github.com/tektoncd/pipeline/pkg/apis/resource/", "")
name = strings.ReplaceAll(name, "k8s.io/api/core/", "")
name = strings.ReplaceAll(name, "k8s.io/apimachinery/pkg/apis/meta/", "")
name = strings.ReplaceAll(name, "knative.dev/pkg/apis.", "knative/")
name = strings.ReplaceAll(name, "knative.dev/pkg/apis/duck/v1beta1.", "knative/")
name = strings.ReplaceAll(name, "/", ".")
return name
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sidecarlogresults
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/result"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
)
// ErrSizeExceeded indicates that the result exceeded its maximum allowed size
var (
ErrSizeExceeded = errors.New("results size exceeds configured limit")
stepDir = pipeline.StepsDir
)
type SidecarLogResultType string
const (
taskResultType SidecarLogResultType = "task"
stepResultType SidecarLogResultType = "step"
stepArtifactType SidecarLogResultType = "stepArtifact"
taskArtifactType SidecarLogResultType = "taskArtifact"
sidecarResultNameSeparator string = "."
)
// SidecarLogResult holds fields for storing extracted results
type SidecarLogResult struct {
Name string `json:"name"`
Value string `json:"value"`
Type SidecarLogResultType `json:"type"`
}
func fileExists(filename string) (bool, error) {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, fmt.Errorf("error checking for file existence %w", err)
}
return !info.IsDir(), nil
}
func encode(w io.Writer, v any) error {
return json.NewEncoder(w).Encode(v)
}
func waitForStepsToFinish(runDir string, sleepInterval time.Duration) error {
steps := make(map[string]bool)
files, err := os.ReadDir(runDir)
if err != nil {
return fmt.Errorf("error parsing the run dir %w", err)
}
for _, file := range files {
steps[filepath.Join(runDir, file.Name(), "out")] = true
}
for len(steps) > 0 {
for stepFile := range steps {
// check if there is a post file without error
exists, err := fileExists(stepFile)
if err != nil {
return fmt.Errorf("error checking for out file's existence %w", err)
}
if exists {
delete(steps, stepFile)
continue
}
// check if there is a post file with error
// if err is nil then either the out.err file does not exist or it does and there was no issue
// in either case, existence of out.err marks that the step errored and the following steps will
// not run. We want the function to break out with nil error in that case so that
// the existing results can be logged.
if exists, err = fileExists(stepFile + ".err"); exists || err != nil {
return err
}
}
if sleepInterval > 0 {
time.Sleep(sleepInterval)
}
}
return nil
}
func createSidecarResultName(stepName, resultName string) string {
return fmt.Sprintf("%s%s%s", stepName, sidecarResultNameSeparator, resultName)
}
// ExtractStepAndResultFromSidecarResultName splits the result name to extract the step
// and result name from it. It only works if the format is <stepName>.<resultName>
func ExtractStepAndResultFromSidecarResultName(sidecarResultName string) (string, string, error) {
splitString := strings.SplitN(sidecarResultName, sidecarResultNameSeparator, 2)
if len(splitString) != 2 {
return "", "", fmt.Errorf("invalid string %s : expected somtthing that looks like <stepName>.<resultName>", sidecarResultName)
}
return splitString[0], splitString[1], nil
}
func readResults(resultsDir, resultFile, stepName string, resultType SidecarLogResultType) (SidecarLogResult, error) {
value, err := os.ReadFile(filepath.Join(resultsDir, resultFile))
if os.IsNotExist(err) {
return SidecarLogResult{}, nil
} else if err != nil {
return SidecarLogResult{}, fmt.Errorf("error reading the results file %w", err)
}
resultName := resultFile
if resultType == stepResultType {
resultName = createSidecarResultName(stepName, resultFile)
}
return SidecarLogResult{
Name: resultName,
Value: string(value),
Type: resultType,
}, nil
}
// LookForResults waits for results to be written out by the steps
// in their results path and prints them in a structured way to its
// stdout so that the reconciler can parse those logs.
func LookForResults(w io.Writer, runDir string, resultsDir string, resultNames []string, stepResultsDir string, stepResults map[string][]string) error {
interval, err := getSidecarLogPollingInterval()
if err != nil {
return fmt.Errorf("error getting polling interval: %w", err)
}
if err := waitForStepsToFinish(runDir, interval); err != nil {
return fmt.Errorf("error while waiting for the steps to finish %w", err)
}
results := make(chan SidecarLogResult)
g := new(errgroup.Group)
for _, resultFile := range resultNames {
g.Go(func() error {
newResult, err := readResults(resultsDir, resultFile, "", taskResultType)
if err != nil {
return err
}
if newResult.Name == "" {
return nil
}
results <- newResult
return nil
})
}
for sName, sresults := range stepResults {
for _, resultName := range sresults {
stepResultsDir := filepath.Join(stepResultsDir, sName, "results")
g.Go(func() error {
newResult, err := readResults(stepResultsDir, resultName, sName, stepResultType)
if err != nil {
return err
}
if newResult.Name == "" {
return nil
}
results <- newResult
return nil
})
}
}
channelGroup := new(errgroup.Group)
channelGroup.Go(func() error {
if err := g.Wait(); err != nil {
return fmt.Errorf("error parsing results: %w", err)
}
close(results)
return nil
})
for result := range results {
if err := encode(w, result); err != nil {
return fmt.Errorf("error writing results: %w", err)
}
}
if err := channelGroup.Wait(); err != nil {
return err
}
return nil
}
// LookForArtifacts searches for and processes artifacts within a specified run directory.
// It looks for "provenance.json" files within the "artifacts" subdirectory of each named step.
// If the provenance file exists, the function extracts artifact information, formats it into a
// JSON string, and encodes it for output alongside relevant metadata (step name, artifact type).
func LookForArtifacts(w io.Writer, names []string, runDir string) error {
interval, err := getSidecarLogPollingInterval()
if err != nil {
return fmt.Errorf("error getting polling interval: %w", err)
}
if err := waitForStepsToFinish(runDir, interval); err != nil {
return err
}
for _, name := range names {
p := filepath.Join(stepDir, name, "artifacts", "provenance.json")
if exist, err := fileExists(p); err != nil {
return err
} else if !exist {
continue
}
subRes, err := extractArtifactsFromFile(p)
if err != nil {
return err
}
values, err := json.Marshal(&subRes)
if err != nil {
return err
}
if err := encode(w, SidecarLogResult{Name: name, Value: string(values), Type: stepArtifactType}); err != nil {
return err
}
}
return nil
}
// GetResultsFromSidecarLogs extracts results from the logs of the results sidecar
func GetResultsFromSidecarLogs(ctx context.Context, clientset kubernetes.Interface, namespace string, name string, container string, podPhase corev1.PodPhase) ([]result.RunResult, error) {
sidecarLogResults := []result.RunResult{}
if podPhase == corev1.PodPending {
return sidecarLogResults, nil
}
podLogOpts := corev1.PodLogOptions{Container: container}
req := clientset.CoreV1().Pods(namespace).GetLogs(name, &podLogOpts)
sidecarLogs, err := req.Stream(ctx)
if err != nil {
return sidecarLogResults, err
}
defer sidecarLogs.Close()
maxResultLimit := config.FromContextOrDefaults(ctx).FeatureFlags.MaxResultSize
return extractResultsFromLogs(sidecarLogs, sidecarLogResults, maxResultLimit)
}
func extractResultsFromLogs(logs io.Reader, sidecarLogResults []result.RunResult, maxResultLimit int) ([]result.RunResult, error) {
scanner := bufio.NewScanner(logs)
buf := make([]byte, maxResultLimit)
scanner.Buffer(buf, maxResultLimit)
for scanner.Scan() {
result, err := parseResults(scanner.Bytes(), maxResultLimit)
if err != nil {
return nil, err
}
sidecarLogResults = append(sidecarLogResults, result)
}
if scanner.Err() != nil {
if errors.Is(scanner.Err(), bufio.ErrTooLong) {
return sidecarLogResults, ErrSizeExceeded
}
return nil, scanner.Err()
}
return sidecarLogResults, nil
}
func parseResults(resultBytes []byte, maxResultLimit int) (result.RunResult, error) {
runResult := result.RunResult{}
var res SidecarLogResult
if err := json.Unmarshal(resultBytes, &res); err != nil {
return runResult, fmt.Errorf("invalid result \"%s\": %w", res.Name, err)
}
if len(resultBytes) > maxResultLimit {
return runResult, fmt.Errorf("invalid result \"%s\": %w of %d", res.Name, ErrSizeExceeded, maxResultLimit)
}
var resultType result.ResultType
switch res.Type {
case taskResultType:
resultType = result.TaskRunResultType
case stepResultType:
resultType = result.StepResultType
case stepArtifactType:
resultType = result.StepArtifactsResultType
case taskArtifactType:
resultType = result.TaskRunArtifactsResultType
default:
return result.RunResult{}, fmt.Errorf("invalid sidecar result type %v. Must be %v or %v or %v", res.Type, taskResultType, stepResultType, stepArtifactType)
}
runResult = result.RunResult{
Key: res.Name,
Value: res.Value,
ResultType: resultType,
}
return runResult, nil
}
func parseArtifacts(fileContent []byte) (v1.Artifacts, error) {
var as v1.Artifacts
if err := json.Unmarshal(fileContent, &as); err != nil {
return as, fmt.Errorf("invalid artifacts : %w", err)
}
return as, nil
}
func extractArtifactsFromFile(filename string) (v1.Artifacts, error) {
b, err := os.ReadFile(filename)
if err != nil {
return v1.Artifacts{}, fmt.Errorf("error reading the results file %w", err)
}
return parseArtifacts(b)
}
// getSidecarLogPollingInterval reads the SIDECAR_LOG_POLLING_INTERVAL environment variable,
// parses it as a time.Duration, and returns the result. If the variable is not set or is invalid,
// it defaults to 100ms.
func getSidecarLogPollingInterval() (time.Duration, error) {
intervalStr := os.Getenv("SIDECAR_LOG_POLLING_INTERVAL")
if intervalStr == "" {
intervalStr = "100ms"
}
interval, err := time.ParseDuration(intervalStr)
if err != nil {
return 100 * time.Millisecond, err
}
return interval, nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"log"
"os"
"reflect"
"strconv"
"strings"
"time"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/yaml"
)
const (
// DefaultTimeoutMinutes is used when no timeout is specified.
DefaultTimeoutMinutes = 60
// NoTimeoutDuration is used when a pipeline or task should never time out.
NoTimeoutDuration = 0 * time.Minute
// DefaultServiceAccountValue is the SA used when one is not specified.
DefaultServiceAccountValue = "default"
// DefaultManagedByLabelValue is the value for the managed-by label that is used by default.
DefaultManagedByLabelValue = "tekton-pipelines"
// DefaultCloudEventSinkValue is the default value for cloud event sinks.
DefaultCloudEventSinkValue = ""
// DefaultMaxMatrixCombinationsCount is used when no max matrix combinations count is specified.
DefaultMaxMatrixCombinationsCount = 256
// DefaultResolverTypeValue is used when no default resolver type is specified
DefaultResolverTypeValue = ""
// default resource requirements, will be applied to all the containers, which has empty resource requirements
ResourceRequirementDefaultContainerKey = "default"
DefaultImagePullBackOffTimeout = 0 * time.Minute
// Default maximum resolution timeout used by the resolution controller before timing out when exceeded
DefaultMaximumResolutionTimeout = 1 * time.Minute
DefaultSidecarLogPollingInterval = 100 * time.Millisecond
// DefaultStepRefConcurrencyLimit is the default concurrency limit for resolving step references.
DefaultStepRefConcurrencyLimit = 5
defaultTimeoutMinutesKey = "default-timeout-minutes"
defaultServiceAccountKey = "default-service-account"
defaultManagedByLabelValueKey = "default-managed-by-label-value"
defaultPodTemplateKey = "default-pod-template"
defaultAAPodTemplateKey = "default-affinity-assistant-pod-template"
defaultCloudEventsSinkKey = "default-cloud-events-sink"
defaultTaskRunWorkspaceBinding = "default-task-run-workspace-binding"
defaultMaxMatrixCombinationsCountKey = "default-max-matrix-combinations-count"
defaultForbiddenEnv = "default-forbidden-env"
defaultResolverTypeKey = "default-resolver-type"
defaultContainerResourceRequirementsKey = "default-container-resource-requirements"
defaultImagePullBackOffTimeout = "default-imagepullbackoff-timeout"
defaultMaximumResolutionTimeout = "default-maximum-resolution-timeout"
defaultSidecarLogPollingIntervalKey = "default-sidecar-log-polling-interval"
DefaultStepRefConcurrencyLimitKey = "default-step-ref-concurrency-limit"
)
// DefaultConfig holds all the default configurations for the config.
var DefaultConfig, _ = NewDefaultsFromMap(map[string]string{})
// Defaults holds the default configurations
// +k8s:deepcopy-gen=true
type Defaults struct {
DefaultTimeoutMinutes int
DefaultServiceAccount string
DefaultManagedByLabelValue string
DefaultPodTemplate *pod.Template
DefaultAAPodTemplate *pod.AffinityAssistantTemplate
DefaultCloudEventsSink string // Deprecated. Use the events package instead
DefaultTaskRunWorkspaceBinding string
DefaultMaxMatrixCombinationsCount int
DefaultForbiddenEnv []string
DefaultResolverType string
DefaultContainerResourceRequirements map[string]corev1.ResourceRequirements
DefaultImagePullBackOffTimeout time.Duration
DefaultMaximumResolutionTimeout time.Duration
// DefaultSidecarLogPollingInterval specifies how frequently (as a time.Duration) the Tekton sidecar log results container polls for step completion files.
// This value is loaded from the 'sidecar-log-polling-interval' key in the config-defaults ConfigMap.
// It is used to control the responsiveness and resource usage of the sidecar in both production and test environments.
DefaultSidecarLogPollingInterval time.Duration
DefaultStepRefConcurrencyLimit int
}
// GetDefaultsConfigName returns the name of the configmap containing all
// defined defaults.
func GetDefaultsConfigName() string {
if e := os.Getenv("CONFIG_DEFAULTS_NAME"); e != "" {
return e
}
return "config-defaults"
}
// Equals returns true if two Configs are identical
func (cfg *Defaults) Equals(other *Defaults) bool {
if cfg == nil && other == nil {
return true
}
if cfg == nil || other == nil {
return false
}
return other.DefaultTimeoutMinutes == cfg.DefaultTimeoutMinutes &&
other.DefaultServiceAccount == cfg.DefaultServiceAccount &&
other.DefaultManagedByLabelValue == cfg.DefaultManagedByLabelValue &&
other.DefaultPodTemplate.Equals(cfg.DefaultPodTemplate) &&
other.DefaultAAPodTemplate.Equals(cfg.DefaultAAPodTemplate) &&
other.DefaultCloudEventsSink == cfg.DefaultCloudEventsSink &&
other.DefaultTaskRunWorkspaceBinding == cfg.DefaultTaskRunWorkspaceBinding &&
other.DefaultMaxMatrixCombinationsCount == cfg.DefaultMaxMatrixCombinationsCount &&
other.DefaultResolverType == cfg.DefaultResolverType &&
other.DefaultImagePullBackOffTimeout == cfg.DefaultImagePullBackOffTimeout &&
other.DefaultMaximumResolutionTimeout == cfg.DefaultMaximumResolutionTimeout &&
other.DefaultSidecarLogPollingInterval == cfg.DefaultSidecarLogPollingInterval &&
other.DefaultStepRefConcurrencyLimit == cfg.DefaultStepRefConcurrencyLimit &&
reflect.DeepEqual(other.DefaultForbiddenEnv, cfg.DefaultForbiddenEnv)
}
// NewDefaultsFromMap returns a Config given a map corresponding to a ConfigMap
func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) {
tc := Defaults{
DefaultTimeoutMinutes: DefaultTimeoutMinutes,
DefaultServiceAccount: DefaultServiceAccountValue,
DefaultManagedByLabelValue: DefaultManagedByLabelValue,
DefaultCloudEventsSink: DefaultCloudEventSinkValue,
DefaultMaxMatrixCombinationsCount: DefaultMaxMatrixCombinationsCount,
DefaultResolverType: DefaultResolverTypeValue,
DefaultImagePullBackOffTimeout: DefaultImagePullBackOffTimeout,
DefaultMaximumResolutionTimeout: DefaultMaximumResolutionTimeout,
DefaultSidecarLogPollingInterval: DefaultSidecarLogPollingInterval,
DefaultStepRefConcurrencyLimit: DefaultStepRefConcurrencyLimit,
}
if defaultTimeoutMin, ok := cfgMap[defaultTimeoutMinutesKey]; ok {
timeout, err := strconv.ParseInt(defaultTimeoutMin, 10, 0)
if err != nil {
return nil, fmt.Errorf("failed parsing default config %q", defaultTimeoutMinutesKey)
}
tc.DefaultTimeoutMinutes = int(timeout)
}
if defaultServiceAccount, ok := cfgMap[defaultServiceAccountKey]; ok {
tc.DefaultServiceAccount = defaultServiceAccount
}
if defaultManagedByLabelValue, ok := cfgMap[defaultManagedByLabelValueKey]; ok {
tc.DefaultManagedByLabelValue = defaultManagedByLabelValue
}
if defaultPodTemplate, ok := cfgMap[defaultPodTemplateKey]; ok {
var podTemplate pod.Template
if err := yamlUnmarshal(defaultPodTemplate, defaultPodTemplateKey, &podTemplate); err != nil {
return nil, fmt.Errorf("failed to unmarshal %v", defaultPodTemplate)
}
tc.DefaultPodTemplate = &podTemplate
}
if defaultAAPodTemplate, ok := cfgMap[defaultAAPodTemplateKey]; ok {
var podTemplate pod.AffinityAssistantTemplate
if err := yamlUnmarshal(defaultAAPodTemplate, defaultAAPodTemplateKey, &podTemplate); err != nil {
return nil, fmt.Errorf("failed to unmarshal %v", defaultAAPodTemplate)
}
tc.DefaultAAPodTemplate = &podTemplate
}
if defaultCloudEventsSink, ok := cfgMap[defaultCloudEventsSinkKey]; ok {
tc.DefaultCloudEventsSink = defaultCloudEventsSink
}
if bindingYAML, ok := cfgMap[defaultTaskRunWorkspaceBinding]; ok {
tc.DefaultTaskRunWorkspaceBinding = bindingYAML
}
if defaultMaxMatrixCombinationsCount, ok := cfgMap[defaultMaxMatrixCombinationsCountKey]; ok {
matrixCombinationsCount, err := strconv.ParseInt(defaultMaxMatrixCombinationsCount, 10, 0)
if err != nil {
return nil, fmt.Errorf("failed parsing default config %q", defaultMaxMatrixCombinationsCountKey)
}
tc.DefaultMaxMatrixCombinationsCount = int(matrixCombinationsCount)
}
if defaultForbiddenEnvString, ok := cfgMap[defaultForbiddenEnv]; ok {
tmpString := sets.NewString()
fEnvs := strings.Split(defaultForbiddenEnvString, ",")
for _, fEnv := range fEnvs {
tmpString.Insert(strings.TrimSpace(fEnv))
}
tc.DefaultForbiddenEnv = tmpString.List()
}
if defaultResolverType, ok := cfgMap[defaultResolverTypeKey]; ok {
tc.DefaultResolverType = defaultResolverType
}
if resourceRequirementsStringValue, ok := cfgMap[defaultContainerResourceRequirementsKey]; ok {
resourceRequirementsValue := make(map[string]corev1.ResourceRequirements)
if err := yamlUnmarshal(resourceRequirementsStringValue, defaultContainerResourceRequirementsKey, &resourceRequirementsValue); err != nil {
return nil, fmt.Errorf("failed to unmarshal %v", resourceRequirementsStringValue)
}
tc.DefaultContainerResourceRequirements = resourceRequirementsValue
}
if defaultImagePullBackOff, ok := cfgMap[defaultImagePullBackOffTimeout]; ok {
timeout, err := time.ParseDuration(defaultImagePullBackOff)
if err != nil {
return nil, fmt.Errorf("failed parsing default config %q", defaultImagePullBackOffTimeout)
}
tc.DefaultImagePullBackOffTimeout = timeout
}
if defaultMaximumResolutionTimeout, ok := cfgMap[defaultMaximumResolutionTimeout]; ok {
timeout, err := time.ParseDuration(defaultMaximumResolutionTimeout)
if err != nil {
return nil, fmt.Errorf("failed parsing default config %q", defaultMaximumResolutionTimeout)
}
tc.DefaultMaximumResolutionTimeout = timeout
}
if defaultSidecarPollingInterval, ok := cfgMap[defaultSidecarLogPollingIntervalKey]; ok {
interval, err := time.ParseDuration(defaultSidecarPollingInterval)
if err != nil {
return nil, fmt.Errorf("failed parsing default config %q", defaultSidecarPollingInterval)
}
tc.DefaultSidecarLogPollingInterval = interval
}
if DefaultStepRefConcurrencyLimit, ok := cfgMap[DefaultStepRefConcurrencyLimitKey]; ok {
stepRefConcurrencyLimit, err := strconv.ParseInt(DefaultStepRefConcurrencyLimit, 10, 0)
if err != nil {
return nil, fmt.Errorf("failed parsing default config %q", DefaultStepRefConcurrencyLimitKey)
}
tc.DefaultStepRefConcurrencyLimit = int(stepRefConcurrencyLimit)
}
return &tc, nil
}
func yamlUnmarshal(s string, key string, o interface{}) error {
b := []byte(s)
if err := yaml.UnmarshalStrict(b, o); err != nil {
log.Printf("warning: failed to decode %q: %q. Trying decode with non-strict mode", key, err)
return yaml.Unmarshal(b, o)
}
return nil
}
// NewDefaultsFromConfigMap returns a Config for the given configmap
func NewDefaultsFromConfigMap(config *corev1.ConfigMap) (*Defaults, error) {
return NewDefaultsFromMap(config.Data)
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"errors"
"os"
"sort"
"strings"
corev1 "k8s.io/api/core/v1"
)
const (
// FormatTektonV1 represents the "v1" events in Tekton custom format
FormatTektonV1 EventFormat = "tektonv1"
// DefaultSink is the default value for "sink"
DefaultSink = ""
formatsKey = "formats"
sinkKey = "sink"
)
var (
// TODO(afrittoli): only one valid format for now, more to come
// See TEP-0137 https://github.com/tektoncd/community/pull/1028
validFormats = EventFormats{FormatTektonV1: struct{}{}}
// DefaultFormat is the default value for "formats"
DefaultFormats = EventFormats{FormatTektonV1: struct{}{}}
// DefaultConfig holds all the default configurations for the config.
DefaultEvents, _ = NewEventsFromMap(map[string]string{})
)
// Events holds the events configurations
// +k8s:deepcopy-gen=true
type Events struct {
Sink string
Formats EventFormats
}
// EventFormat is a single event format
type EventFormat string
// EventFormats is a set of event formats
type EventFormats map[EventFormat]struct{}
// String is a string representation of an EventFormat
func (ef EventFormat) String() string {
return string(ef)
}
// IsValid returns true is the EventFormat one of the valid ones
func (ef EventFormat) IsValid() bool {
_, ok := validFormats[ef]
return ok
}
// String is a string representation of an EventFormats
func (efs EventFormats) String() string {
// Make an array of map keys
keys := make([]string, len(efs))
i := 0
for k := range efs {
keys[i] = k.String()
i++
}
// Sorting helps with testing
sort.Strings(keys)
// Build a comma separated list
return strings.Join(keys, ",")
}
// Equals defines identity between EventFormats
func (efs EventFormats) Equals(other EventFormats) bool {
if len(efs) != len(other) {
return false
}
for key := range efs {
if _, ok := other[key]; !ok {
return false
}
}
return true
}
// ParseEventFormats converts a comma separated list into a EventFormats set
func ParseEventFormats(formats string) (EventFormats, error) {
// An empty string is not a valid configuration
if formats == "" {
return EventFormats{}, errors.New("formats cannot be empty")
}
stringFormats := strings.Split(formats, ",")
var eventFormats EventFormats = make(map[EventFormat]struct{}, len(stringFormats))
for _, format := range stringFormats {
if !EventFormat(format).IsValid() {
return EventFormats{}, errors.New("invalid format: " + format)
}
// If already in the map (duplicate), fail
if _, ok := eventFormats[EventFormat(format)]; ok {
return EventFormats{}, errors.New("duplicate format: " + format)
}
eventFormats[EventFormat(format)] = struct{}{}
}
return eventFormats, nil
}
// GetEventsConfigName returns the name of the configmap containing all
// feature flags.
func GetEventsConfigName() string {
if e := os.Getenv("CONFIG_EVENTS_NAME"); e != "" {
return e
}
return "config-events"
}
// NewEventsFromMap returns a Config given a map corresponding to a ConfigMap
func NewEventsFromMap(cfgMap map[string]string) (*Events, error) {
// for any string field with no extra validation
setField := func(key string, defaultValue string, field *string) {
if cfg, ok := cfgMap[key]; ok {
*field = cfg
} else {
*field = defaultValue
}
}
events := Events{}
err := setFormats(cfgMap, DefaultFormats, &events.Formats)
if err != nil {
return nil, err
}
setField(sinkKey, DefaultSink, &events.Sink)
return &events, nil
}
func setFormats(cfgMap map[string]string, defaultValue EventFormats, field *EventFormats) error {
value := defaultValue
if cfg, ok := cfgMap[formatsKey]; ok {
v, err := ParseEventFormats(cfg)
if err != nil {
return err
}
value = v
}
*field = value
return nil
}
// NewEventsFromConfigMap returns a Config for the given configmap
func NewEventsFromConfigMap(config *corev1.ConfigMap) (*Events, error) {
return NewEventsFromMap(config.Data)
}
// Equals returns true if two Configs are identical
func (cfg *Events) Equals(other *Events) bool {
if cfg == nil && other == nil {
return true
}
if cfg == nil || other == nil {
return false
}
return other.Sink == cfg.Sink &&
other.Formats.Equals(cfg.Formats)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"os"
"strconv"
"strings"
)
const (
// StableAPIFields is the value used for API-driven features of stable stability level.
StableAPIFields = "stable"
// AlphaAPIFields is the value used for API-driven features of alpha stability level.
AlphaAPIFields = "alpha"
// BetaAPIFields is the value used for API-driven features of beta stability level.
BetaAPIFields = "beta"
// Features of "alpha" stability level are disabled by default
DefaultAlphaFeatureEnabled = false
// Features of "beta" stability level are disabled by default
DefaultBetaFeatureEnabled = false
// Features of "stable" stability level are enabled by default
DefaultStableFeatureEnabled = true
// FailNoMatchPolicy is the value used for "trusted-resources-verification-no-match-policy" to fail TaskRun or PipelineRun
// when no matching policies are found
FailNoMatchPolicy = "fail"
// WarnNoMatchPolicy is the value used for "trusted-resources-verification-no-match-policy" to log warning and skip verification
// when no matching policies are found
WarnNoMatchPolicy = "warn"
// IgnoreNoMatchPolicy is the value used for "trusted-resources-verification-no-match-policy" to skip verification
// when no matching policies are found
IgnoreNoMatchPolicy = "ignore"
// CoscheduleWorkspaces is the value used for "coschedule" to coschedule PipelineRun Pods sharing the same PVC workspaces to the same node
CoscheduleWorkspaces = "workspaces"
// CoschedulePipelineRuns is the value used for "coschedule" to coschedule all PipelineRun Pods to the same node
CoschedulePipelineRuns = "pipelineruns"
// CoscheduleIsolatePipelineRun is the value used for "coschedule" to coschedule all PipelineRun Pods to the same node, and only allows one PipelineRun to run on a node at a time
CoscheduleIsolatePipelineRun = "isolate-pipelinerun"
// CoscheduleDisabled is the value used for "coschedule" to disabled PipelineRun Pods coschedule
CoscheduleDisabled = "disabled"
// ResultExtractionMethodTerminationMessage is the value used for "results-from" as a way to extract results from tasks using kubernetes termination message.
ResultExtractionMethodTerminationMessage = "termination-message"
// ResultExtractionMethodSidecarLogs is the value used for "results-from" as a way to extract results from tasks using sidecar logs.
ResultExtractionMethodSidecarLogs = "sidecar-logs"
// DefaultDisableCredsInit is the default value for "disable-creds-init".
DefaultDisableCredsInit = false
// DefaultRunningInEnvWithInjectedSidecars is the default value for "running-in-environment-with-injected-sidecars".
DefaultRunningInEnvWithInjectedSidecars = true
// DefaultAwaitSidecarReadiness is the default value for "await-sidecar-readiness".
DefaultAwaitSidecarReadiness = true
// DefaultDisableInlineSpec is the default value of "disable-inline-spec"
DefaultDisableInlineSpec = ""
// DefaultRequireGitSSHSecretKnownHosts is the default value for "require-git-ssh-secret-known-hosts".
DefaultRequireGitSSHSecretKnownHosts = false
// DefaultEnableTektonOciBundles is the default value for "enable-tekton-oci-bundles".
DefaultEnableTektonOciBundles = false
// DefaultEnableAPIFields is the default value for "enable-api-fields".
DefaultEnableAPIFields = BetaAPIFields
// DefaultSendCloudEventsForRuns is the default value for "send-cloudevents-for-runs".
DefaultSendCloudEventsForRuns = false
// EnforceNonfalsifiabilityWithSpire is the value used for "enable-nonfalsifiability" when SPIRE is used to enable non-falsifiability.
EnforceNonfalsifiabilityWithSpire = "spire"
// EnforceNonfalsifiabilityNone is the value used for "enable-nonfalsifiability" when non-falsifiability is not enabled.
EnforceNonfalsifiabilityNone = "none"
// DefaultEnforceNonfalsifiability is the default value for "enforce-nonfalsifiability".
DefaultEnforceNonfalsifiability = EnforceNonfalsifiabilityNone
// DefaultNoMatchPolicyConfig is the default value for "trusted-resources-verification-no-match-policy".
DefaultNoMatchPolicyConfig = IgnoreNoMatchPolicy
// DefaultEnableProvenanceInStatus is the default value for "enable-provenance-status".
DefaultEnableProvenanceInStatus = true
// DefaultResultExtractionMethod is the default value for ResultExtractionMethod
DefaultResultExtractionMethod = ResultExtractionMethodTerminationMessage
// DefaultMaxResultSize is the default value in bytes for the size of a result
DefaultMaxResultSize = 4096
// DefaultSetSecurityContext is the default value for "set-security-context"
DefaultSetSecurityContext = false
// DefaultSetSecurityContextReadOnlyRootFilesystem is the default value for "set-security-context-read-only-root-filesystem"
DefaultSetSecurityContextReadOnlyRootFilesystem = false
// DefaultCoschedule is the default value for coschedule
DefaultCoschedule = CoscheduleWorkspaces
// KeepPodOnCancel is the flag used to enable cancelling a pod using the entrypoint, and keep pod on cancel
KeepPodOnCancel = "keep-pod-on-cancel"
// EnableCELInWhenExpression is the flag to enabled CEL in WhenExpression
EnableCELInWhenExpression = "enable-cel-in-whenexpression"
// EnableArtifacts is the flag to enable the use of Artifacts in Steps
EnableArtifacts = "enable-artifacts"
// EnableParamEnum is the flag to enabled enum in params
EnableParamEnum = "enable-param-enum"
// EnableConciseResolverSyntax is the flag to enable concise resolver syntax
EnableConciseResolverSyntax = "enable-concise-resolver-syntax"
// EnableKubernetesSidecar is the flag to enable kubernetes sidecar support
EnableKubernetesSidecar = "enable-kubernetes-sidecar"
// DefaultEnableKubernetesSidecar is the default value for EnableKubernetesSidecar
DefaultEnableKubernetesSidecar = false
// EnableWaitExponentialBackoff is the flag to enable exponential backoff strategy
EnableWaitExponentialBackoff = "enable-wait-exponential-backoff"
// DefaultEnableWaitExponentialBackoff is the default value for EnableWaitExponentialBackoff
DefaultEnableWaitExponentialBackoff = false
// EnableStepActions is the flag to enable step actions (no-op since it's stable)
EnableStepActions = "enable-step-actions"
// DisableInlineSpec is the flag to disable embedded spec
// in Taskrun or Pipelinerun
DisableInlineSpec = "disable-inline-spec"
disableCredsInitKey = "disable-creds-init"
runningInEnvWithInjectedSidecarsKey = "running-in-environment-with-injected-sidecars"
awaitSidecarReadinessKey = "await-sidecar-readiness"
requireGitSSHSecretKnownHostsKey = "require-git-ssh-secret-known-hosts" //nolint:gosec
// enableTektonOCIBundles = "enable-tekton-oci-bundles"
enableAPIFields = "enable-api-fields"
sendCloudEventsForRuns = "send-cloudevents-for-runs"
enforceNonfalsifiability = "enforce-nonfalsifiability"
verificationNoMatchPolicy = "trusted-resources-verification-no-match-policy"
enableProvenanceInStatus = "enable-provenance-in-status"
resultExtractionMethod = "results-from"
maxResultSize = "max-result-size"
setSecurityContextKey = "set-security-context"
setSecurityContextReadOnlyRootFilesystemKey = "set-security-context-read-only-root-filesystem"
coscheduleKey = "coschedule"
)
// DefaultFeatureFlags holds all the default configurations for the feature flags configmap.
var (
DefaultFeatureFlags, _ = NewFeatureFlagsFromMap(map[string]string{})
// DefaultEnableKeepPodOnCancel is the default PerFeatureFlag value for "keep-pod-on-cancel"
DefaultEnableKeepPodOnCancel = PerFeatureFlag{
Name: KeepPodOnCancel,
Stability: BetaAPIFields,
Enabled: DefaultBetaFeatureEnabled,
}
// DefaultEnableCELInWhenExpression is the default PerFeatureFlag value for EnableCELInWhenExpression
DefaultEnableCELInWhenExpression = PerFeatureFlag{
Name: EnableCELInWhenExpression,
Stability: AlphaAPIFields,
Enabled: DefaultAlphaFeatureEnabled,
}
// DefaultEnableArtifacts is the default PerFeatureFlag value for EnableArtifacts
DefaultEnableArtifacts = PerFeatureFlag{
Name: EnableArtifacts,
Stability: AlphaAPIFields,
Enabled: DefaultAlphaFeatureEnabled,
}
// DefaultEnableParamEnum is the default PerFeatureFlag value for EnableParamEnum
DefaultEnableParamEnum = PerFeatureFlag{
Name: EnableParamEnum,
Stability: AlphaAPIFields,
Enabled: DefaultAlphaFeatureEnabled,
}
// DefaultEnableConciseResolverSyntax is the default PerFeatureFlag value for EnableConciseResolverSyntax
DefaultEnableConciseResolverSyntax = PerFeatureFlag{
Name: EnableConciseResolverSyntax,
Stability: AlphaAPIFields,
Enabled: DefaultAlphaFeatureEnabled,
}
)
// FeatureFlags holds the features configurations
// +k8s:deepcopy-gen=true
type FeatureFlags struct {
DisableCredsInit bool `json:"disableCredsInit,omitempty"`
RunningInEnvWithInjectedSidecars bool `json:"runningInEnvWithInjectedSidecars,omitempty"`
RequireGitSSHSecretKnownHosts bool `json:"requireGitSSHSecretKnownHosts,omitempty"`
EnableAPIFields string `json:"enableAPIFields,omitempty"`
SendCloudEventsForRuns bool `json:"sendCloudEventsForRuns,omitempty"`
AwaitSidecarReadiness bool `json:"awaitSidecarReadiness,omitempty"`
EnforceNonfalsifiability string `json:"enforceNonfalsifiability,omitempty"`
EnableKeepPodOnCancel bool `json:"enableKeepPodOnCancel,omitempty"`
// VerificationNoMatchPolicy is the feature flag for "trusted-resources-verification-no-match-policy"
// VerificationNoMatchPolicy can be set to "ignore", "warn" and "fail" values.
// ignore: skip trusted resources verification when no matching verification policies found
// warn: skip trusted resources verification when no matching verification policies found and log a warning
// fail: fail the taskrun or pipelines run if no matching verification policies found
VerificationNoMatchPolicy string `json:"verificationNoMatchPolicy,omitempty"`
EnableProvenanceInStatus bool `json:"enableProvenanceInStatus,omitempty"`
ResultExtractionMethod string `json:"resultExtractionMethod,omitempty"`
MaxResultSize int `json:"maxResultSize,omitempty"`
SetSecurityContext bool `json:"setSecurityContext,omitempty"`
SetSecurityContextReadOnlyRootFilesystem bool `json:"setSecurityContextReadOnlyRootFilesystem,omitempty"`
Coschedule string `json:"coschedule,omitempty"`
EnableCELInWhenExpression bool `json:"enableCELInWhenExpression,omitempty"`
// EnableStepActions is a no-op flag since StepActions are stable
EnableStepActions bool `json:"enableStepActions,omitempty"`
EnableParamEnum bool `json:"enableParamEnum,omitempty"`
EnableArtifacts bool `json:"enableArtifacts,omitempty"`
DisableInlineSpec string `json:"disableInlineSpec,omitempty"`
EnableConciseResolverSyntax bool `json:"enableConciseResolverSyntax,omitempty"`
EnableKubernetesSidecar bool `json:"enableKubernetesSidecar,omitempty"`
EnableWaitExponentialBackoff bool `json:"enableWaitExponentialBackoff,omitempty"`
}
// GetFeatureFlagsConfigName returns the name of the configmap containing all
// feature flags.
func GetFeatureFlagsConfigName() string {
if e := os.Getenv("CONFIG_FEATURE_FLAGS_NAME"); e != "" {
return e
}
return "feature-flags"
}
// NewFeatureFlagsFromMap returns a Config given a map corresponding to a ConfigMap
func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) {
setPerFeatureFlag := func(key string, defaultValue PerFeatureFlag, feature *bool) error {
if cfg, ok := cfgMap[key]; ok {
value, err := strconv.ParseBool(cfg)
if err != nil {
return fmt.Errorf("failed parsing feature flags config %q: %w for feature %s", cfg, err, key)
}
*feature = value
return nil
}
*feature = defaultValue.Enabled
return nil
}
setFeature := func(key string, defaultValue bool, feature *bool) error {
if cfg, ok := cfgMap[key]; ok {
value, err := strconv.ParseBool(cfg)
if err != nil {
return fmt.Errorf("failed parsing feature flags config %q: %w", cfg, err)
}
*feature = value
return nil
}
*feature = defaultValue
return nil
}
tc := FeatureFlags{}
if err := setFeature(disableCredsInitKey, DefaultDisableCredsInit, &tc.DisableCredsInit); err != nil {
return nil, err
}
if err := setFeature(runningInEnvWithInjectedSidecarsKey, DefaultRunningInEnvWithInjectedSidecars, &tc.RunningInEnvWithInjectedSidecars); err != nil {
return nil, err
}
if err := setFeature(awaitSidecarReadinessKey, DefaultAwaitSidecarReadiness, &tc.AwaitSidecarReadiness); err != nil {
return nil, err
}
if err := setFeature(requireGitSSHSecretKnownHostsKey, DefaultRequireGitSSHSecretKnownHosts, &tc.RequireGitSSHSecretKnownHosts); err != nil {
return nil, err
}
if err := setEnabledAPIFields(cfgMap, DefaultEnableAPIFields, &tc.EnableAPIFields); err != nil {
return nil, err
}
if err := setFeature(sendCloudEventsForRuns, DefaultSendCloudEventsForRuns, &tc.SendCloudEventsForRuns); err != nil {
return nil, err
}
if err := setVerificationNoMatchPolicy(cfgMap, DefaultNoMatchPolicyConfig, &tc.VerificationNoMatchPolicy); err != nil {
return nil, err
}
if err := setFeature(enableProvenanceInStatus, DefaultEnableProvenanceInStatus, &tc.EnableProvenanceInStatus); err != nil {
return nil, err
}
if err := setResultExtractionMethod(cfgMap, DefaultResultExtractionMethod, &tc.ResultExtractionMethod); err != nil {
return nil, err
}
if err := setMaxResultSize(cfgMap, DefaultMaxResultSize, &tc.MaxResultSize); err != nil {
return nil, err
}
if err := setPerFeatureFlag(KeepPodOnCancel, DefaultEnableKeepPodOnCancel, &tc.EnableKeepPodOnCancel); err != nil {
return nil, err
}
if err := setEnforceNonFalsifiability(cfgMap, &tc.EnforceNonfalsifiability); err != nil {
return nil, err
}
if err := setFeature(setSecurityContextKey, DefaultSetSecurityContext, &tc.SetSecurityContext); err != nil {
return nil, err
}
if err := setFeature(setSecurityContextReadOnlyRootFilesystemKey, DefaultSetSecurityContextReadOnlyRootFilesystem, &tc.SetSecurityContextReadOnlyRootFilesystem); err != nil {
return nil, err
}
if err := setCoschedule(cfgMap, DefaultCoschedule, &tc.Coschedule); err != nil {
return nil, err
}
if err := setPerFeatureFlag(EnableCELInWhenExpression, DefaultEnableCELInWhenExpression, &tc.EnableCELInWhenExpression); err != nil {
return nil, err
}
if err := setPerFeatureFlag(EnableParamEnum, DefaultEnableParamEnum, &tc.EnableParamEnum); err != nil {
return nil, err
}
if err := setPerFeatureFlag(EnableArtifacts, DefaultEnableArtifacts, &tc.EnableArtifacts); err != nil {
return nil, err
}
if err := setFeatureInlineSpec(cfgMap, DisableInlineSpec, DefaultDisableInlineSpec, &tc.DisableInlineSpec); err != nil {
return nil, err
}
if err := setPerFeatureFlag(EnableConciseResolverSyntax, DefaultEnableConciseResolverSyntax, &tc.EnableConciseResolverSyntax); err != nil {
return nil, err
}
if err := setFeature(EnableKubernetesSidecar, DefaultEnableKubernetesSidecar, &tc.EnableKubernetesSidecar); err != nil {
return nil, err
}
if err := setFeature(EnableWaitExponentialBackoff, DefaultEnableWaitExponentialBackoff, &tc.EnableWaitExponentialBackoff); err != nil {
return nil, err
}
return &tc, nil
}
// setEnabledAPIFields sets the "enable-api-fields" flag based on the content of a given map.
// If the feature gate is invalid or missing then an error is returned.
func setEnabledAPIFields(cfgMap map[string]string, defaultValue string, feature *string) error {
value := defaultValue
if cfg, ok := cfgMap[enableAPIFields]; ok {
value = strings.ToLower(cfg)
}
switch value {
case AlphaAPIFields, BetaAPIFields, StableAPIFields:
*feature = value
default:
return fmt.Errorf("invalid value for feature flag %q: %q", enableAPIFields, value)
}
return nil
}
// setCoschedule sets the "coschedule" flag based on the content of a given map.
func setCoschedule(cfgMap map[string]string, defaultValue string, feature *string) error {
value := defaultValue
if cfg, ok := cfgMap[coscheduleKey]; ok {
value = strings.ToLower(cfg)
}
switch value {
case CoscheduleDisabled, CoscheduleWorkspaces, CoschedulePipelineRuns, CoscheduleIsolatePipelineRun:
*feature = value
default:
return fmt.Errorf("invalid value for feature flag %q: %q", coscheduleKey, value)
}
return nil
}
// setEnforceNonFalsifiability sets the "enforce-nonfalsifiability" flag based on the content of a given map.
// If the feature gate is invalid, then an error is returned.
func setEnforceNonFalsifiability(cfgMap map[string]string, feature *string) error {
value := DefaultEnforceNonfalsifiability
if cfg, ok := cfgMap[enforceNonfalsifiability]; ok {
value = strings.ToLower(cfg)
}
// validate that "enforce-nonfalsifiability" is set to a valid value
switch value {
case EnforceNonfalsifiabilityNone, EnforceNonfalsifiabilityWithSpire:
*feature = value
return nil
default:
return fmt.Errorf("invalid value for feature flag %q: %q", enforceNonfalsifiability, value)
}
}
func setFeatureInlineSpec(cfgMap map[string]string, key string, defaultValue string, feature *string) error {
if cfg, ok := cfgMap[key]; ok {
*feature = cfg
return nil
}
*feature = strings.ReplaceAll(defaultValue, " ", "")
return nil
}
// setResultExtractionMethod sets the "results-from" flag based on the content of a given map.
// If the feature gate is invalid or missing then an error is returned.
func setResultExtractionMethod(cfgMap map[string]string, defaultValue string, feature *string) error {
value := defaultValue
if cfg, ok := cfgMap[resultExtractionMethod]; ok {
value = strings.ToLower(cfg)
}
switch value {
case ResultExtractionMethodTerminationMessage, ResultExtractionMethodSidecarLogs:
*feature = value
default:
return fmt.Errorf("invalid value for feature flag %q: %q", resultExtractionMethod, value)
}
return nil
}
// setMaxResultSize sets the "max-result-size" flag based on the content of a given map.
// If the feature gate is invalid or missing then an error is returned.
func setMaxResultSize(cfgMap map[string]string, defaultValue int, feature *int) error {
value := defaultValue
if cfg, ok := cfgMap[maxResultSize]; ok {
v, err := strconv.Atoi(cfg)
if err != nil {
return err
}
value = v
}
// if max limit is > 1.5 MB (CRD limit).
if value >= 1572864 {
return fmt.Errorf("invalid value for feature flag %q: %q. This is exceeding the CRD limit", resultExtractionMethod, strconv.Itoa(value))
}
*feature = value
return nil
}
// setVerificationNoMatchPolicy sets the "trusted-resources-verification-no-match-policy" flag based on the content of a given map.
// If the value is invalid or missing then an error is returned.
func setVerificationNoMatchPolicy(cfgMap map[string]string, defaultValue string, feature *string) error {
value := defaultValue
if cfg, ok := cfgMap[verificationNoMatchPolicy]; ok {
value = strings.ToLower(cfg)
}
switch value {
case FailNoMatchPolicy, WarnNoMatchPolicy, IgnoreNoMatchPolicy:
*feature = value
default:
return fmt.Errorf("invalid value for feature flag %q: %q", verificationNoMatchPolicy, value)
}
return nil
}
type PerFeatureFlag struct {
// Name of the feature flag
Name string
// Stability level of the feature, one of StableAPIFields, BetaAPIFields or AlphaAPIFields
Stability string
// Enabled is whether the feature is turned on
Enabled bool
// Deprecated indicates whether the feature is deprecated
// +optional
//nolint:gocritic
Deprecated bool
}
//go:build !disable_tls
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"context"
"fmt"
"knative.dev/pkg/apis"
)
// ValidateEnabledAPIFields checks that the enable-api-fields feature gate is set
// to a version at most as stable as wantVersion, if not, returns an error stating which feature
// is dependent on the version and what the current version actually is.
func ValidateEnabledAPIFields(ctx context.Context, featureName string, wantVersion string) *apis.FieldError {
currentVersion := FromContextOrDefaults(ctx).FeatureFlags.EnableAPIFields
var errs *apis.FieldError
message := `%s requires "enable-api-fields" feature gate to be %s but it is %q`
switch wantVersion {
case StableAPIFields:
// If the feature is stable, it doesn't matter what the current version is
case BetaAPIFields:
// If the feature requires "beta" fields to be enabled, the current version may be "beta" or "alpha"
if currentVersion != BetaAPIFields && currentVersion != AlphaAPIFields {
message = fmt.Sprintf(message, featureName, fmt.Sprintf("%q or %q", AlphaAPIFields, BetaAPIFields), currentVersion)
errs = apis.ErrGeneric(message)
}
case AlphaAPIFields:
// If the feature requires "alpha" fields to be enabled, the current version must be "alpha"
if currentVersion != wantVersion {
message = fmt.Sprintf(message, featureName, fmt.Sprintf("%q", AlphaAPIFields), currentVersion)
errs = apis.ErrGeneric(message)
}
default:
errs = apis.ErrGeneric("invalid wantVersion %s, must be one of (%s, %s, %s)", wantVersion, AlphaAPIFields, BetaAPIFields, StableAPIFields)
}
return errs
}
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
corev1 "k8s.io/api/core/v1"
)
const (
// metricsTaskrunLevel determines to what level to aggregate metrics
// for taskrun
metricsTaskrunLevelKey = "metrics.taskrun.level"
// metricsPipelinerunLevel determines to what level to aggregate metrics
// for pipelinerun
metricsPipelinerunLevelKey = "metrics.pipelinerun.level"
// metricsRunningPipelinerunLevelKey determines to what level to aggregate metrics
// for running pipelineruns
metricsRunningPipelinerunLevelKey = "metrics.running-pipelinerun.level"
// metricsDurationTaskrunType determines what type of
// metrics to use for aggregating duration for taskrun
metricsDurationTaskrunType = "metrics.taskrun.duration-type"
// metricsDurationPipelinerunType determines what type of
// metrics to use for aggregating duration for pipelinerun
metricsDurationPipelinerunType = "metrics.pipelinerun.duration-type"
// countWithReasonKey sets if the reason label should be included on count metrics
countWithReasonKey = "metrics.count.enable-reason"
// throttledWithNamespaceKey sets if the namespace label should be included on the taskrun throttled metrics
throttledWithNamespaceKey = "metrics.taskrun.throttle.enable-namespace"
// DefaultTaskrunLevel determines to what level to aggregate metrics
// when it isn't specified in configmap
DefaultTaskrunLevel = TaskrunLevelAtTask
// TaskrunLevelAtTaskrun specify that aggregation will be done at
// taskrun level
TaskrunLevelAtTaskrun = "taskrun"
// TaskrunLevelAtTask specify that aggregation will be done at task level
TaskrunLevelAtTask = "task"
// TaskrunLevelAtNS specify that aggregation will be done at namespace level
TaskrunLevelAtNS = "namespace"
// DefaultPipelinerunLevel determines to what level to aggregate metrics
// when it isn't specified in configmap
DefaultPipelinerunLevel = PipelinerunLevelAtPipeline
// DefaultRunningPipelinerunLevel determines to what level to aggregate metrics
// when it isn't specified in configmap
DefaultRunningPipelinerunLevel = ""
// PipelinerunLevelAtPipelinerun specify that aggregation will be done at
// pipelinerun level
PipelinerunLevelAtPipelinerun = "pipelinerun"
// PipelinerunLevelAtPipeline specify that aggregation will be done at
// pipeline level
PipelinerunLevelAtPipeline = "pipeline"
// PipelinerunLevelAtNS specify that aggregation will be done at
// namespace level
PipelinerunLevelAtNS = "namespace"
// DefaultDurationTaskrunType determines what type
// of metrics to use when we don't specify one in
// configmap
DefaultDurationTaskrunType = "histogram"
// DurationTaskrunTypeHistogram specify that histogram
// type metrics need to be use for Duration of Taskrun
DurationTaskrunTypeHistogram = "histogram"
// DurationTaskrunTypeLastValue specify that lastValue or
// gauge type metrics need to be use for Duration of Taskrun
DurationTaskrunTypeLastValue = "lastvalue"
// DefaultDurationPipelinerunType determines what type
// of metrics to use when we don't specify one in
// configmap
DefaultDurationPipelinerunType = "histogram"
// DurationPipelinerunTypeHistogram specify that histogram
// type metrics need to be use for Duration of Pipelinerun
DurationPipelinerunTypeHistogram = "histogram"
// DurationPipelinerunTypeLastValue specify that lastValue or
// gauge type metrics need to be use for Duration of Pipelinerun
DurationPipelinerunTypeLastValue = "lastvalue"
)
// DefaultMetrics holds all the default configurations for the metrics.
var DefaultMetrics, _ = newMetricsFromMap(map[string]string{})
// Metrics holds the configurations for the metrics
// +k8s:deepcopy-gen=true
type Metrics struct {
TaskrunLevel string
PipelinerunLevel string
RunningPipelinerunLevel string
DurationTaskrunType string
DurationPipelinerunType string
CountWithReason bool
ThrottleWithNamespace bool
}
// Equals returns true if two Configs are identical
func (cfg *Metrics) Equals(other *Metrics) bool {
if cfg == nil && other == nil {
return true
}
if cfg == nil || other == nil {
return false
}
return other.TaskrunLevel == cfg.TaskrunLevel &&
other.PipelinerunLevel == cfg.PipelinerunLevel &&
other.DurationTaskrunType == cfg.DurationTaskrunType &&
other.DurationPipelinerunType == cfg.DurationPipelinerunType &&
other.CountWithReason == cfg.CountWithReason
}
// newMetricsFromMap returns a Config given a map corresponding to a ConfigMap
func newMetricsFromMap(cfgMap map[string]string) (*Metrics, error) {
tc := Metrics{
TaskrunLevel: DefaultTaskrunLevel,
PipelinerunLevel: DefaultPipelinerunLevel,
RunningPipelinerunLevel: DefaultRunningPipelinerunLevel,
DurationTaskrunType: DefaultDurationTaskrunType,
DurationPipelinerunType: DefaultDurationPipelinerunType,
CountWithReason: false,
ThrottleWithNamespace: false,
}
if taskrunLevel, ok := cfgMap[metricsTaskrunLevelKey]; ok {
tc.TaskrunLevel = taskrunLevel
}
if pipelinerunLevel, ok := cfgMap[metricsPipelinerunLevelKey]; ok {
tc.PipelinerunLevel = pipelinerunLevel
}
if runningPipelinerunLevel, ok := cfgMap[metricsRunningPipelinerunLevelKey]; ok {
tc.RunningPipelinerunLevel = runningPipelinerunLevel
}
if durationTaskrun, ok := cfgMap[metricsDurationTaskrunType]; ok {
tc.DurationTaskrunType = durationTaskrun
}
if durationPipelinerun, ok := cfgMap[metricsDurationPipelinerunType]; ok {
tc.DurationPipelinerunType = durationPipelinerun
}
if countWithReason, ok := cfgMap[countWithReasonKey]; ok && countWithReason != "false" {
tc.CountWithReason = true
}
if throttleWithNamespace, ok := cfgMap[throttledWithNamespaceKey]; ok && throttleWithNamespace != "false" {
tc.ThrottleWithNamespace = true
}
return &tc, nil
}
// NewMetricsFromConfigMap returns a Config for the given configmap
func NewMetricsFromConfigMap(config *corev1.ConfigMap) (*Metrics, error) {
return newMetricsFromMap(config.Data)
}
//go:build !disable_tls
package config
import (
"context"
corev1 "k8s.io/api/core/v1"
"knative.dev/pkg/metrics"
)
// GetMetricsConfigName returns the name of the configmap containing all
// customizations for the storage bucket.
func GetMetricsConfigName() string {
return metrics.ConfigMapName()
}
// NewFeatureFlagsFromConfigMap returns a Config for the given configmap
func NewFeatureFlagsFromConfigMap(config *corev1.ConfigMap) (*FeatureFlags, error) {
return NewFeatureFlagsFromMap(config.Data)
}
// GetVerificationNoMatchPolicy returns the "trusted-resources-verification-no-match-policy" value
func GetVerificationNoMatchPolicy(ctx context.Context) string {
return FromContextOrDefaults(ctx).FeatureFlags.VerificationNoMatchPolicy
}
// IsSpireEnabled checks if non-falsifiable provenance is enforced through SPIRE
func IsSpireEnabled(ctx context.Context) bool {
return FromContextOrDefaults(ctx).FeatureFlags.EnforceNonfalsifiability == EnforceNonfalsifiabilityWithSpire
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resolver
import (
"fmt"
"os"
"strconv"
corev1 "k8s.io/api/core/v1"
)
const (
// DefaultEnableGitResolver is the default value for "enable-git-resolver".
DefaultEnableGitResolver = true
// DefaultEnableHubResolver is the default value for "enable-hub-resolver".
DefaultEnableHubResolver = true
// DefaultEnableBundlesResolver is the default value for "enable-bundles-resolver".
DefaultEnableBundlesResolver = true
// DefaultEnableClusterResolver is the default value for "enable-cluster-resolver".
DefaultEnableClusterResolver = true
// DefaultEnableHttpResolver is the default value for "enable-http-resolver".
DefaultEnableHttpResolver = true
// EnableGitResolver is the flag used to enable the git remote resolver
EnableGitResolver = "enable-git-resolver"
// EnableHubResolver is the flag used to enable the hub remote resolver
EnableHubResolver = "enable-hub-resolver"
// EnableBundlesResolver is the flag used to enable the bundle remote resolver
EnableBundlesResolver = "enable-bundles-resolver"
// EnableClusterResolver is the flag used to enable the cluster remote resolver
EnableClusterResolver = "enable-cluster-resolver"
// EnableHttpResolver is the flag used to enable the http remote resolver
EnableHttpResolver = "enable-http-resolver"
)
// FeatureFlags holds the features configurations
// +k8s:deepcopy-gen=true
type FeatureFlags struct {
EnableGitResolver bool
EnableHubResolver bool
EnableBundleResolver bool
EnableClusterResolver bool
EnableHttpResolver bool
}
// GetFeatureFlagsConfigName returns the name of the configmap containing all
// feature flags.
func GetFeatureFlagsConfigName() string {
if e := os.Getenv("CONFIG_RESOLVERS_FEATURE_FLAGS_NAME"); e != "" {
return e
}
return "resolvers-feature-flags"
}
// NewFeatureFlagsFromMap returns a Config given a map corresponding to a ConfigMap
func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) {
setFeature := func(key string, defaultValue bool, feature *bool) error {
if cfg, ok := cfgMap[key]; ok {
value, err := strconv.ParseBool(cfg)
if err != nil {
return fmt.Errorf("failed parsing feature flags config %q: %w", cfg, err)
}
*feature = value
return nil
}
*feature = defaultValue
return nil
}
tc := FeatureFlags{}
if err := setFeature(EnableGitResolver, DefaultEnableGitResolver, &tc.EnableGitResolver); err != nil {
return nil, err
}
if err := setFeature(EnableHubResolver, DefaultEnableHubResolver, &tc.EnableHubResolver); err != nil {
return nil, err
}
if err := setFeature(EnableBundlesResolver, DefaultEnableBundlesResolver, &tc.EnableBundleResolver); err != nil {
return nil, err
}
if err := setFeature(EnableClusterResolver, DefaultEnableClusterResolver, &tc.EnableClusterResolver); err != nil {
return nil, err
}
if err := setFeature(EnableHttpResolver, DefaultEnableHttpResolver, &tc.EnableHttpResolver); err != nil {
return nil, err
}
return &tc, nil
}
// NewFeatureFlagsFromConfigMap returns a Config for the given configmap
func NewFeatureFlagsFromConfigMap(config *corev1.ConfigMap) (*FeatureFlags, error) {
return NewFeatureFlagsFromMap(config.Data)
}
//go:build !disable_tls
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resolver
import (
"context"
"knative.dev/pkg/configmap"
)
type cfgKey struct{}
// Config holds the collection of configurations that we attach to contexts.
// +k8s:deepcopy-gen=false
type Config struct {
FeatureFlags *FeatureFlags
}
// ResolversNamespace takes the pipelines namespace and appends "-resolvers" to it.
func ResolversNamespace(baseNS string) string {
return baseNS + "-resolvers"
}
// FromContext extracts a Config from the provided context.
func FromContext(ctx context.Context) *Config {
x, ok := ctx.Value(cfgKey{}).(*Config)
if ok {
return x
}
return nil
}
// FromContextOrDefaults is like FromContext, but when no Config is attached it
// returns a Config populated with the defaults for each of the Config fields.
func FromContextOrDefaults(ctx context.Context) *Config {
if cfg := FromContext(ctx); cfg != nil {
return cfg
}
featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{})
return &Config{
FeatureFlags: featureFlags,
}
}
// ToContext attaches the provided Config to the provided context, returning the
// new context with the Config attached.
func ToContext(ctx context.Context, c *Config) context.Context {
return context.WithValue(ctx, cfgKey{}, c)
}
// Store is a typed wrapper around configmap.Untyped store to handle our configmaps.
// +k8s:deepcopy-gen=false
type Store struct {
*configmap.UntypedStore
}
// NewStore creates a new store of Configs and optionally calls functions when ConfigMaps are updated.
func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value interface{})) *Store {
store := &Store{
UntypedStore: configmap.NewUntypedStore(
"features",
logger,
configmap.Constructors{
GetFeatureFlagsConfigName(): NewFeatureFlagsFromConfigMap,
},
onAfterStore...,
),
}
return store
}
// ToContext attaches the current Config state to the provided context.
func (s *Store) ToContext(ctx context.Context) context.Context {
return ToContext(ctx, s.Load())
}
// Load creates a Config from the current config state of the Store.
func (s *Store) Load() *Config {
featureFlags := s.UntypedLoad(GetFeatureFlagsConfigName())
if featureFlags == nil {
featureFlags, _ = NewFeatureFlagsFromMap(map[string]string{})
}
return &Config{
FeatureFlags: featureFlags.(*FeatureFlags).DeepCopy(),
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package resolver
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeatureFlags) DeepCopyInto(out *FeatureFlags) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureFlags.
func (in *FeatureFlags) DeepCopy() *FeatureFlags {
if in == nil {
return nil
}
out := new(FeatureFlags)
in.DeepCopyInto(out)
return out
}
//go:build !disable_tls
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"os"
sc "github.com/tektoncd/pipeline/pkg/spire/config"
corev1 "k8s.io/api/core/v1"
)
const (
// SpireConfigMapName is the name of the trusted resources configmap
SpireConfigMapName = "config-spire"
// SpireTrustDomain is the key to extract out the SPIRE trust domain to use
SpireTrustDomain = "spire-trust-domain"
// SpireSocketPath is the key to extract out the SPIRE agent socket for SPIFFE workload API
SpireSocketPath = "spire-socket-path"
// SpireServerAddr is the key to extract out the SPIRE server address for workload/node registration
SpireServerAddr = "spire-server-addr"
// SpireNodeAliasPrefix is the key to extract out the SPIRE node alias prefix to use
SpireNodeAliasPrefix = "spire-node-alias-prefix"
// SpireTrustDomainDefault is the default value for the SpireTrustDomain
SpireTrustDomainDefault = "example.org"
// SpireSocketPathDefault is the default value for the SpireSocketPath
SpireSocketPathDefault = "unix:///spiffe-workload-api/spire-agent.sock"
// SpireServerAddrDefault is the default value for the SpireServerAddr
SpireServerAddrDefault = "spire-server.spire.svc.cluster.local:8081"
// SpireNodeAliasPrefixDefault is the default value for the SpireNodeAliasPrefix
SpireNodeAliasPrefixDefault = "/tekton-node/"
)
// DefaultSpire hols all the default configurations for the spire.
var DefaultSpire, _ = NewSpireConfigFromMap(map[string]string{})
// NewSpireConfigFromMap creates a Config from the supplied map
func NewSpireConfigFromMap(data map[string]string) (*sc.SpireConfig, error) {
cfg := &sc.SpireConfig{}
var ok bool
if cfg.TrustDomain, ok = data[SpireTrustDomain]; !ok {
cfg.TrustDomain = SpireTrustDomainDefault
}
if cfg.SocketPath, ok = data[SpireSocketPath]; !ok {
cfg.SocketPath = SpireSocketPathDefault
}
if cfg.ServerAddr, ok = data[SpireServerAddr]; !ok {
cfg.ServerAddr = SpireServerAddrDefault
}
if cfg.NodeAliasPrefix, ok = data[SpireNodeAliasPrefix]; !ok {
cfg.NodeAliasPrefix = SpireNodeAliasPrefixDefault
}
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("failed to parse SPIRE configmap: %w", err)
}
return cfg, nil
}
// NewSpireConfigFromConfigMap creates a Config from the supplied ConfigMap
func NewSpireConfigFromConfigMap(configMap *corev1.ConfigMap) (*sc.SpireConfig, error) {
return NewSpireConfigFromMap(configMap.Data)
}
// GetSpireConfigName returns the name of Spire ConfigMap
func GetSpireConfigName() string {
if e := os.Getenv("CONFIG_SPIRE"); e != "" {
return e
}
return SpireConfigMapName
}
//go:build !disable_tls
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"context"
sc "github.com/tektoncd/pipeline/pkg/spire/config"
"knative.dev/pkg/configmap"
)
type cfgKey struct{}
// Config holds the collection of configurations that we attach to contexts.
// +k8s:deepcopy-gen=false
type Config struct {
Defaults *Defaults
FeatureFlags *FeatureFlags
Metrics *Metrics
SpireConfig *sc.SpireConfig
Events *Events
Tracing *Tracing
WaitExponentialBackoff *WaitExponentialBackoff
}
// FromContext extracts a Config from the provided context.
func FromContext(ctx context.Context) *Config {
x, ok := ctx.Value(cfgKey{}).(*Config)
if ok {
return x
}
return nil
}
// FromContextOrDefaults is like FromContext, but when no Config is attached it
// returns a Config populated with the defaults for each of the Config fields.
func FromContextOrDefaults(ctx context.Context) *Config {
if cfg := FromContext(ctx); cfg != nil {
return cfg
}
return &Config{
Defaults: DefaultConfig.DeepCopy(),
FeatureFlags: DefaultFeatureFlags.DeepCopy(),
Metrics: DefaultMetrics.DeepCopy(),
SpireConfig: DefaultSpire.DeepCopy(),
Events: DefaultEvents.DeepCopy(),
Tracing: DefaultTracing.DeepCopy(),
WaitExponentialBackoff: DefaultWaitExponentialBackoff.DeepCopy(),
}
}
// ToContext attaches the provided Config to the provided context, returning the
// new context with the Config attached.
func ToContext(ctx context.Context, c *Config) context.Context {
return context.WithValue(ctx, cfgKey{}, c)
}
// Store is a typed wrapper around configmap.Untyped store to handle our configmaps.
// +k8s:deepcopy-gen=false
type Store struct {
*configmap.UntypedStore
}
// NewStore creates a new store of Configs and optionally calls functions when ConfigMaps are updated.
func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value interface{})) *Store {
store := &Store{
UntypedStore: configmap.NewUntypedStore(
"defaults/features/artifacts",
logger,
configmap.Constructors{
GetDefaultsConfigName(): NewDefaultsFromConfigMap,
GetFeatureFlagsConfigName(): NewFeatureFlagsFromConfigMap,
GetMetricsConfigName(): NewMetricsFromConfigMap,
GetSpireConfigName(): NewSpireConfigFromConfigMap,
GetEventsConfigName(): NewEventsFromConfigMap,
GetTracingConfigName(): NewTracingFromConfigMap,
GetWaitExponentialBackoffConfigName(): NewWaitExponentialBackoffFromConfigMap,
},
onAfterStore...,
),
}
return store
}
// ToContext attaches the current Config state to the provided context.
func (s *Store) ToContext(ctx context.Context) context.Context {
return ToContext(ctx, s.Load())
}
// Load creates a Config from the current config state of the Store.
func (s *Store) Load() *Config {
defaults := s.UntypedLoad(GetDefaultsConfigName())
if defaults == nil {
defaults = DefaultConfig.DeepCopy()
}
featureFlags := s.UntypedLoad(GetFeatureFlagsConfigName())
if featureFlags == nil {
featureFlags = DefaultFeatureFlags.DeepCopy()
}
metrics := s.UntypedLoad(GetMetricsConfigName())
if metrics == nil {
metrics = DefaultMetrics.DeepCopy()
}
tracing := s.UntypedLoad(GetTracingConfigName())
if tracing == nil {
tracing = DefaultTracing.DeepCopy()
}
spireconfig := s.UntypedLoad(GetSpireConfigName())
if spireconfig == nil {
spireconfig = DefaultSpire.DeepCopy()
}
events := s.UntypedLoad(GetEventsConfigName())
if events == nil {
events = DefaultEvents.DeepCopy()
}
waitExponentialBackoff := s.UntypedLoad(GetWaitExponentialBackoffConfigName())
if waitExponentialBackoff == nil {
waitExponentialBackoff = DefaultWaitExponentialBackoff.DeepCopy()
}
return &Config{
Defaults: defaults.(*Defaults).DeepCopy(),
FeatureFlags: featureFlags.(*FeatureFlags).DeepCopy(),
Metrics: metrics.(*Metrics).DeepCopy(),
Tracing: tracing.(*Tracing).DeepCopy(),
SpireConfig: spireconfig.(*sc.SpireConfig).DeepCopy(),
Events: events.(*Events).DeepCopy(),
WaitExponentialBackoff: waitExponentialBackoff.(*WaitExponentialBackoff).DeepCopy(),
}
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"testing"
"github.com/tektoncd/pipeline/pkg/apis/config"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
logtesting "knative.dev/pkg/logging/testing"
)
// SetDefaults sets the default ConfigMap values in an existing context (for use in testing)
func SetDefaults(ctx context.Context, t *testing.T, data map[string]string) context.Context {
t.Helper()
s := config.NewStore(logtesting.TestLogger(t))
s.OnConfigChanged(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: config.GetDefaultsConfigName(),
},
Data: data,
})
return s.ToContext(ctx)
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"testing"
"github.com/tektoncd/pipeline/pkg/apis/config"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/logging"
)
// SetFeatureFlags sets the feature-flags ConfigMap values in an existing context (for use in testing)
func SetFeatureFlags(ctx context.Context, t *testing.T, data map[string]string) context.Context {
t.Helper()
s := config.NewStore(logging.FromContext(ctx).Named("config-store"))
s.OnConfigChanged(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: config.GetFeatureFlagsConfigName(),
},
Data: data,
})
return s.ToContext(ctx)
}
// EnableAlphaAPIFields enables alpha features in an existing context (for use in testing)
func EnableAlphaAPIFields(ctx context.Context) context.Context {
return setEnableAPIFields(ctx, config.AlphaAPIFields)
}
// EnableBetaAPIFields enables beta features in an existing context (for use in testing)
func EnableBetaAPIFields(ctx context.Context) context.Context {
return setEnableAPIFields(ctx, config.BetaAPIFields)
}
// EnableStableAPIFields enables stable features in an existing context (for use in testing)
func EnableStableAPIFields(ctx context.Context) context.Context {
return setEnableAPIFields(ctx, config.StableAPIFields)
}
func setEnableAPIFields(ctx context.Context, want string) context.Context {
featureFlags, _ := config.NewFeatureFlagsFromMap(map[string]string{
"enable-api-fields": want,
})
cfg := &config.Config{
Defaults: &config.Defaults{
DefaultTimeoutMinutes: 60,
},
FeatureFlags: featureFlags,
}
return config.ToContext(ctx, cfg)
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"os"
"strconv"
corev1 "k8s.io/api/core/v1"
)
const (
// tracingEnabledKey is the configmap key which determines if tracing is enabled
tracingEnabledKey = "enabled"
// tracingEndpintKey is the configmap key for tracing api endpoint
tracingEndpointKey = "endpoint"
// tracingCredentialsSecretKey is the name of the secret which contains credentials for tracing endpoint
tracingCredentialsSecretKey = "credentialsSecret"
// DefaultEndpoint is the default destination for sending traces
DefaultEndpoint = "http://jaeger-collector.jaeger.svc.cluster.local:14268/api/traces"
)
// DefaultTracing holds all the default configurations for tracing
var DefaultTracing, _ = newTracingFromMap(map[string]string{})
// Tracing holds the configurations for tracing
// +k8s:deepcopy-gen=true
type Tracing struct {
Enabled bool
Endpoint string
CredentialsSecret string
}
// Equals returns true if two Configs are identical
func (cfg *Tracing) Equals(other *Tracing) bool {
if cfg == nil && other == nil {
return true
}
if cfg == nil || other == nil {
return false
}
return other.Enabled == cfg.Enabled &&
other.Endpoint == cfg.Endpoint &&
other.CredentialsSecret == cfg.CredentialsSecret
}
// GetTracingConfigName returns the name of the configmap containing all
// customizations for tracing
func GetTracingConfigName() string {
if e := os.Getenv("CONFIG_TRACING_NAME"); e != "" {
return e
}
return "config-tracing"
}
// newTracingFromMap returns a Config given a map from ConfigMap
func newTracingFromMap(config map[string]string) (*Tracing, error) {
t := Tracing{
Enabled: false,
Endpoint: DefaultEndpoint,
}
if endpoint, ok := config[tracingEndpointKey]; ok {
t.Endpoint = endpoint
}
if secret, ok := config[tracingCredentialsSecretKey]; ok {
t.CredentialsSecret = secret
}
if enabled, ok := config[tracingEnabledKey]; ok {
e, err := strconv.ParseBool(enabled)
if err != nil {
return nil, fmt.Errorf("failed parsing tracing config %q: %w", enabled, err)
}
t.Enabled = e
}
return &t, nil
}
// NewTracingFromConfigMap returns a Config given a ConfigMap
func NewTracingFromConfigMap(config *corev1.ConfigMap) (*Tracing, error) {
return newTracingFromMap(config.Data)
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"os"
"strconv"
"time"
corev1 "k8s.io/api/core/v1"
)
const (
waitExponentialBackoffDurationKey = "duration"
waitExponentialBackoffFactorKey = "factor"
waitExponentialBackoffJitterKey = "jitter"
waitExponentialBackoffStepsKey = "steps"
waitExponentialBackoffCapKey = "cap"
DefaultWaitExponentialBackoffDuration = "1s"
DefaultWaitExponentialBackoffFactor = 2.0
DefaultWaitExponentialBackoffJitter = 0.0
DefaultWaitExponentialBackoffSteps = 10
DefaultWaitExponentialBackoffCap = "60s"
)
// DefaultWaitExponentialBackoff holds all the default configurations for wait-exponential-backoff
var DefaultWaitExponentialBackoff, _ = newWaitExponentialBackoffFromMap(map[string]string{})
// WaitExponentialBackoff holds the configurations for exponential backoff
// +k8s:deepcopy-gen=true
type WaitExponentialBackoff struct {
Duration time.Duration
Factor float64
Jitter float64
Steps int
Cap time.Duration
}
// Equals returns true if two Configs are identical
func (cfg *WaitExponentialBackoff) Equals(other *WaitExponentialBackoff) bool {
if cfg == nil && other == nil {
return true
}
if cfg == nil || other == nil {
return false
}
return other.Duration == cfg.Duration &&
other.Factor == cfg.Factor &&
other.Jitter == cfg.Jitter &&
other.Steps == cfg.Steps &&
other.Cap == cfg.Cap
}
// GetWaitExponentialBackoffConfigName returns the name of the configmap containing all customizations for wait-exponential-backoff
func GetWaitExponentialBackoffConfigName() string {
if e := os.Getenv("CONFIG_WAIT_EXPONENTIAL_BACKOFF_NAME"); e != "" {
return e
}
return "config-wait-exponential-backoff"
}
// newWaitExponentialBackoffFromMap returns a Config given a map from ConfigMap
func newWaitExponentialBackoffFromMap(config map[string]string) (*WaitExponentialBackoff, error) {
w := WaitExponentialBackoff{}
durationStr := DefaultWaitExponentialBackoffDuration
if v, ok := config[waitExponentialBackoffDurationKey]; ok {
durationStr = v
}
dur, err := time.ParseDuration(durationStr)
if err != nil {
return nil, fmt.Errorf("failed parsing duration %q: %w", durationStr, err)
}
w.Duration = dur
factor := DefaultWaitExponentialBackoffFactor
if v, ok := config[waitExponentialBackoffFactorKey]; ok {
f, err := strconv.ParseFloat(v, 64)
if err != nil {
return nil, fmt.Errorf("failed parsing factor %q: %w", v, err)
}
factor = f
}
w.Factor = factor
jitter := DefaultWaitExponentialBackoffJitter
if v, ok := config[waitExponentialBackoffJitterKey]; ok {
j, err := strconv.ParseFloat(v, 64)
if err != nil {
return nil, fmt.Errorf("failed parsing jitter %q: %w", v, err)
}
jitter = j
}
w.Jitter = jitter
steps := DefaultWaitExponentialBackoffSteps
if v, ok := config[waitExponentialBackoffStepsKey]; ok {
s, err := strconv.Atoi(v)
if err != nil {
return nil, fmt.Errorf("failed parsing steps %q: %w", v, err)
}
steps = s
}
w.Steps = steps
capStr := DefaultWaitExponentialBackoffCap
if v, ok := config[waitExponentialBackoffCapKey]; ok {
capStr = v
}
capDur, err := time.ParseDuration(capStr)
if err != nil {
return nil, fmt.Errorf("failed parsing cap %q: %w", capStr, err)
}
w.Cap = capDur
return &w, nil
}
// NewWaitExponentialBackoffFromConfigMap returns a Config given a ConfigMap
func NewWaitExponentialBackoffFromConfigMap(config *corev1.ConfigMap) (*WaitExponentialBackoff, error) {
return newWaitExponentialBackoffFromMap(config.Data)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
import (
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
v1 "k8s.io/api/core/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Defaults) DeepCopyInto(out *Defaults) {
*out = *in
if in.DefaultPodTemplate != nil {
in, out := &in.DefaultPodTemplate, &out.DefaultPodTemplate
*out = new(pod.Template)
(*in).DeepCopyInto(*out)
}
if in.DefaultAAPodTemplate != nil {
in, out := &in.DefaultAAPodTemplate, &out.DefaultAAPodTemplate
*out = new(pod.AffinityAssistantTemplate)
(*in).DeepCopyInto(*out)
}
if in.DefaultForbiddenEnv != nil {
in, out := &in.DefaultForbiddenEnv, &out.DefaultForbiddenEnv
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DefaultContainerResourceRequirements != nil {
in, out := &in.DefaultContainerResourceRequirements, &out.DefaultContainerResourceRequirements
*out = make(map[string]v1.ResourceRequirements, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Defaults.
func (in *Defaults) DeepCopy() *Defaults {
if in == nil {
return nil
}
out := new(Defaults)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Events) DeepCopyInto(out *Events) {
*out = *in
if in.Formats != nil {
in, out := &in.Formats, &out.Formats
*out = make(EventFormats, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Events.
func (in *Events) DeepCopy() *Events {
if in == nil {
return nil
}
out := new(Events)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeatureFlags) DeepCopyInto(out *FeatureFlags) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureFlags.
func (in *FeatureFlags) DeepCopy() *FeatureFlags {
if in == nil {
return nil
}
out := new(FeatureFlags)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Metrics) DeepCopyInto(out *Metrics) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metrics.
func (in *Metrics) DeepCopy() *Metrics {
if in == nil {
return nil
}
out := new(Metrics)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Tracing) DeepCopyInto(out *Tracing) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tracing.
func (in *Tracing) DeepCopy() *Tracing {
if in == nil {
return nil
}
out := new(Tracing)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WaitExponentialBackoff) DeepCopyInto(out *WaitExponentialBackoff) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WaitExponentialBackoff.
func (in *WaitExponentialBackoff) DeepCopy() *WaitExponentialBackoff {
if in == nil {
return nil
}
out := new(WaitExponentialBackoff)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errors
import (
"errors"
"strings"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
const UserErrorLabel = "[User error] "
type UserError struct {
Reason string
Original error
}
var _ error = &UserError{}
// Error returns the original error message. This implements the error.Error interface.
func (e *UserError) Error() string {
return e.Original.Error()
}
// Unwrap returns the original error without the Reason annotation. This is
// intended to support usage of errors.Is and errors.As with Errors.
func (e *UserError) Unwrap() error {
return e.Original
}
// newUserError returns a UserError with the given reason and underlying
// original error.
func newUserError(reason string, err error) *UserError {
return &UserError{
Reason: reason,
Original: err,
}
}
// WrapUserError wraps the original error with the user error label
func WrapUserError(err error) error {
return newUserError(UserErrorLabel, err)
}
// LabelUserError labels the failure RunStatus message if any of its error messages has been
// wrapped as an UserError. It indicates that the user is responsible for an error.
// See github.com/tektoncd/pipeline/blob/main/docs/pipelineruns.md#marking-off-user-errors
// for more details.
func LabelUserError(messageFormat string, messageA []interface{}) string {
for _, message := range messageA {
if ue, ok := message.(*UserError); ok {
return ue.Reason + messageFormat
}
}
return messageFormat
}
// GetErrorMessage returns the error message with the user error label if it is of type user
// error
func GetErrorMessage(err error) string {
var ue *UserError
if errors.As(err, &ue) {
return ue.Reason + err.Error()
}
return err.Error()
}
// IsImmutableTaskRunSpecError returns true if the error is the taskrun spec is immutable
func IsImmutableTaskRunSpecError(err error) bool {
// The TaskRun may have completed and the spec field is immutable.
// validation code: https://github.com/tektoncd/pipeline/blob/v0.62.0/pkg/apis/pipeline/v1/taskrun_validation.go#L136-L138
return apierrors.IsBadRequest(err) && strings.Contains(err.Error(), "no updates are allowed")
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipeline
import (
"fmt"
"sort"
)
// Images holds the images reference for a number of container images used
// across tektoncd pipelines.
type Images struct {
// EntrypointImage is container image containing our entrypoint binary.
EntrypointImage string
// SidecarLogResultsImage is container image containing the binary that fetches results from the steps and logs it to stdout.
SidecarLogResultsImage string
// NopImage is the container image used to kill sidecars.
NopImage string
// ShellImage is the container image containing bash shell.
ShellImage string
// ShellImageWin is the container image containing powershell.
ShellImageWin string
// WorkingDirInitImage is the container image containing our working dir init binary.
WorkingDirInitImage string
// NOTE: Make sure to add any new images to Validate below!
}
// Validate returns an error if any image is not set.
func (i Images) Validate() error {
var unset []string
for _, f := range []struct {
v, name string
}{
{i.EntrypointImage, "entrypoint-image"},
{i.SidecarLogResultsImage, "sidecarlogresults-image"},
{i.NopImage, "nop-image"},
{i.ShellImage, "shell-image"},
{i.ShellImageWin, "shell-image-win"},
{i.WorkingDirInitImage, "workingdirinit-image"},
} {
if f.v == "" {
unset = append(unset, f.name)
}
}
if len(unset) > 0 {
sort.Strings(unset)
return fmt.Errorf("found unset image flags: %s", unset)
}
return nil
}
package checksum
import (
"crypto/sha256"
"encoding/json"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// SignatureAnnotation is the key of signature in annotation map
SignatureAnnotation = "tekton.dev/signature"
)
// PrepareObjectMeta will remove annotations not configured from user side -- "kubectl-client-side-apply" and "kubectl.kubernetes.io/last-applied-configuration"
// (added when an object is created with `kubectl apply`) to avoid verification failure and extract the signature.
// Returns a copy of the input object metadata with the annotations removed and the object's signature,
// if it is present in the metadata.
func PrepareObjectMeta(in metav1.Object) metav1.ObjectMeta {
outMeta := metav1.ObjectMeta{}
// exclude the fields populated by system.
outMeta.Name = in.GetName()
outMeta.GenerateName = in.GetGenerateName()
outMeta.Namespace = in.GetNamespace()
if in.GetLabels() != nil {
outMeta.Labels = make(map[string]string)
for k, v := range in.GetLabels() {
outMeta.Labels[k] = v
}
}
outMeta.Annotations = make(map[string]string)
for k, v := range in.GetAnnotations() {
outMeta.Annotations[k] = v
}
// exclude the annotations added by other components
delete(outMeta.Annotations, "kubectl-client-side-apply")
delete(outMeta.Annotations, "kubectl.kubernetes.io/last-applied-configuration")
delete(outMeta.Annotations, SignatureAnnotation)
return outMeta
}
// ComputeSha256Checksum computes the sha256 checksum of the tekton object.
func ComputeSha256Checksum(obj interface{}) ([]byte, error) {
ts, err := json.Marshal(obj)
if err != nil {
return nil, fmt.Errorf("failed to marshal the object: %w", err)
}
h := sha256.New()
h.Write(ts)
return h.Sum(nil), nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"reflect"
corev1 "k8s.io/api/core/v1"
)
// AffinityAssistantTemplate holds pod specific configuration and is a subset
// of the generic pod Template
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true
type AffinityAssistantTemplate struct {
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// If specified, the pod's tolerations.
// +optional
// +listType=atomic
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified
// +optional
// +listType=atomic
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// SecurityContext sets the security context for the pod
// +optional
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
// If specified, indicates the pod's priority. "system-node-critical" and
// "system-cluster-critical" are two special keywords which indicate the
// highest priorities with the former being the highest priority. Any other
// name must be defined by creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
PriorityClassName *string `json:"priorityClassName,omitempty"`
}
// Equals checks if this Template is identical to the given Template.
func (tpl *AffinityAssistantTemplate) Equals(other *AffinityAssistantTemplate) bool {
if tpl == nil && other == nil {
return true
}
if tpl == nil || other == nil {
return false
}
return reflect.DeepEqual(tpl, other)
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"reflect"
corev1 "k8s.io/api/core/v1"
)
// +listType=atomic
type Volumes []corev1.Volume
// Template holds pod specific configuration
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true
type Template struct {
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// List of environment variables that can be provided to the containers belonging to the pod.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=atomic
Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge" protobuf:"bytes,7,rep,name=env"`
// If specified, the pod's tolerations.
// +optional
// +listType=atomic
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// If specified, the pod's scheduling constraints.
// See Pod.spec.affinity (API version: v1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// See Pod.spec.securityContext (API version: v1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
// List of volumes that can be mounted by containers belonging to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes
// See Pod.spec.volumes (API version: v1)
// +optional
// +patchMergeKey=name
// +patchStrategy=merge,retainKeys
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Volumes Volumes `json:"volumes,omitempty" patchMergeKey:"name" patchStrategy:"merge,retainKeys" protobuf:"bytes,1,rep,name=volumes"`
// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io
// group, which should be used to run this pod. If no RuntimeClass resource
// matches the named class, the pod will not be run. If unset or empty, the
// "legacy" RuntimeClass will be used, which is an implicit class with an
// empty definition that uses the default runtime handler.
// More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md
// This is a beta feature as of Kubernetes v1.14.
// +optional
RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,2,opt,name=runtimeClassName"`
// AutomountServiceAccountToken indicates whether pods running as this
// service account should have an API token automatically mounted.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,3,opt,name=automountServiceAccountToken"`
// Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are
// 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig
// will be merged with the policy selected with DNSPolicy.
// +optional
DNSPolicy *corev1.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,4,opt,name=dnsPolicy,casttype=k8s.io/api/core/v1.DNSPolicy"`
// Specifies the DNS parameters of a pod.
// Parameters specified here will be merged to the generated DNS
// configuration based on DNSPolicy.
// +optional
DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,5,opt,name=dnsConfig"`
// EnableServiceLinks indicates whether information about services should be injected into pod's
// environment variables, matching the syntax of Docker links.
// Optional: Defaults to true.
// +optional
EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,6,opt,name=enableServiceLinks"`
// If specified, indicates the pod's priority. "system-node-critical" and
// "system-cluster-critical" are two special keywords which indicate the
// highest priorities with the former being the highest priority. Any other
// name must be defined by creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
PriorityClassName *string `json:"priorityClassName,omitempty" protobuf:"bytes,7,opt,name=priorityClassName"`
// SchedulerName specifies the scheduler to be used to dispatch the Pod
// +optional
SchedulerName string `json:"schedulerName,omitempty"`
// ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified
// +optional
// +listType=atomic
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
// file if specified. This is only valid for non-hostNetwork pods.
// +optional
// +listType=atomic
HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"`
// HostNetwork specifies whether the pod may use the node network namespace
// +optional
HostNetwork bool `json:"hostNetwork,omitempty"`
// TopologySpreadConstraints controls how Pods are spread across your cluster among
// failure-domains such as regions, zones, nodes, and other user-defined topology domains.
// +optional
// +listType=atomic
TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
}
// Equals checks if this Template is identical to the given Template.
func (tpl *Template) Equals(other *Template) bool {
if tpl == nil && other == nil {
return true
}
if tpl == nil || other == nil {
return false
}
return reflect.DeepEqual(tpl, other)
}
// ToAffinityAssistantTemplate converts to a affinity assistant pod Template
func (tpl *Template) ToAffinityAssistantTemplate() *AffinityAssistantTemplate {
if tpl == nil {
return nil
}
return &AffinityAssistantTemplate{
NodeSelector: tpl.NodeSelector,
Tolerations: tpl.Tolerations,
ImagePullSecrets: tpl.ImagePullSecrets,
SecurityContext: tpl.SecurityContext,
PriorityClassName: tpl.PriorityClassName,
}
}
// PodTemplate holds pod specific configuration
//
//nolint:revive
type PodTemplate = Template
// MergePodTemplateWithDefault merges 2 PodTemplates together. If the same
// field is set on both templates, the value from tpl will overwrite the value
// from defaultTpl.
func MergePodTemplateWithDefault(tpl, defaultTpl *PodTemplate) *PodTemplate {
switch {
case defaultTpl == nil:
// No configured default, just return the template
return tpl
case tpl == nil:
// No template, just return the default template
return defaultTpl
default:
// Otherwise, merge fields
if tpl.NodeSelector == nil {
tpl.NodeSelector = defaultTpl.NodeSelector
}
tpl.Env = mergeByName(defaultTpl.Env, tpl.Env)
if tpl.Tolerations == nil {
tpl.Tolerations = defaultTpl.Tolerations
}
if tpl.Affinity == nil {
tpl.Affinity = defaultTpl.Affinity
}
if tpl.SecurityContext == nil {
tpl.SecurityContext = defaultTpl.SecurityContext
}
tpl.Volumes = mergeByName(defaultTpl.Volumes, tpl.Volumes)
if tpl.RuntimeClassName == nil {
tpl.RuntimeClassName = defaultTpl.RuntimeClassName
}
if tpl.AutomountServiceAccountToken == nil {
tpl.AutomountServiceAccountToken = defaultTpl.AutomountServiceAccountToken
}
if tpl.DNSPolicy == nil {
tpl.DNSPolicy = defaultTpl.DNSPolicy
}
if tpl.DNSConfig == nil {
tpl.DNSConfig = defaultTpl.DNSConfig
}
if tpl.EnableServiceLinks == nil {
tpl.EnableServiceLinks = defaultTpl.EnableServiceLinks
}
if tpl.PriorityClassName == nil {
tpl.PriorityClassName = defaultTpl.PriorityClassName
}
if tpl.SchedulerName == "" {
tpl.SchedulerName = defaultTpl.SchedulerName
}
if tpl.ImagePullSecrets == nil {
tpl.ImagePullSecrets = defaultTpl.ImagePullSecrets
}
if tpl.HostAliases == nil {
tpl.HostAliases = defaultTpl.HostAliases
}
if !tpl.HostNetwork && defaultTpl.HostNetwork {
tpl.HostNetwork = true
}
if tpl.TopologySpreadConstraints == nil {
tpl.TopologySpreadConstraints = defaultTpl.TopologySpreadConstraints
}
return tpl
}
}
// AAPodTemplate holds pod specific configuration for the affinity-assistant
type AAPodTemplate = AffinityAssistantTemplate
// MergeAAPodTemplateWithDefault is the same as MergePodTemplateWithDefault but
// for AffinityAssistantPodTemplates.
func MergeAAPodTemplateWithDefault(tpl, defaultTpl *AAPodTemplate) *AAPodTemplate {
switch {
case defaultTpl == nil:
// No configured default, just return the template
return tpl
case tpl == nil:
// No template, just return the default template
return defaultTpl
default:
// Otherwise, merge fields
if tpl.NodeSelector == nil {
tpl.NodeSelector = defaultTpl.NodeSelector
}
if tpl.Tolerations == nil {
tpl.Tolerations = defaultTpl.Tolerations
}
if tpl.ImagePullSecrets == nil {
tpl.ImagePullSecrets = defaultTpl.ImagePullSecrets
}
if tpl.SecurityContext == nil {
tpl.SecurityContext = defaultTpl.SecurityContext
}
if tpl.PriorityClassName == nil {
tpl.PriorityClassName = defaultTpl.PriorityClassName
}
return tpl
}
}
// mergeByName merges two slices of items with names based on the getName
// function, giving priority to the items in the override slice.
func mergeByName[T any](base, overrides []T) []T {
if len(overrides) == 0 {
return base
}
// create a map to store the exist names in the override slice
exists := make(map[string]struct{})
merged := make([]T, 0, len(base)+len(overrides))
// append the items in the override slice
for _, item := range overrides {
name := getName(item)
if name != "" { // name should not be empty, if empty, ignore
merged = append(merged, item)
exists[name] = struct{}{}
}
}
// append the items in the base slice if they have a different name
for _, item := range base {
name := getName(item)
if name != "" { // name should not be empty, if empty, ignore
if _, found := exists[name]; !found {
merged = append(merged, item)
}
}
}
return merged
}
// getName returns the name of the given item, or an empty string if the item
// is not a supported type.
func getName(item interface{}) string {
switch item := item.(type) {
case corev1.EnvVar:
return item.Name
case corev1.Volume:
return item.Name
default:
return ""
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package pod
import (
v1 "k8s.io/api/core/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AffinityAssistantTemplate) DeepCopyInto(out *AffinityAssistantTemplate) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]v1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(v1.PodSecurityContext)
(*in).DeepCopyInto(*out)
}
if in.PriorityClassName != nil {
in, out := &in.PriorityClassName, &out.PriorityClassName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AffinityAssistantTemplate.
func (in *AffinityAssistantTemplate) DeepCopy() *AffinityAssistantTemplate {
if in == nil {
return nil
}
out := new(AffinityAssistantTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Template) DeepCopyInto(out *Template) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]v1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(v1.PodSecurityContext)
(*in).DeepCopyInto(*out)
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make(Volumes, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RuntimeClassName != nil {
in, out := &in.RuntimeClassName, &out.RuntimeClassName
*out = new(string)
**out = **in
}
if in.AutomountServiceAccountToken != nil {
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
*out = new(bool)
**out = **in
}
if in.DNSPolicy != nil {
in, out := &in.DNSPolicy, &out.DNSPolicy
*out = new(v1.DNSPolicy)
**out = **in
}
if in.DNSConfig != nil {
in, out := &in.DNSConfig, &out.DNSConfig
*out = new(v1.PodDNSConfig)
(*in).DeepCopyInto(*out)
}
if in.EnableServiceLinks != nil {
in, out := &in.EnableServiceLinks, &out.EnableServiceLinks
*out = new(bool)
**out = **in
}
if in.PriorityClassName != nil {
in, out := &in.PriorityClassName, &out.PriorityClassName
*out = new(string)
**out = **in
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]v1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.HostAliases != nil {
in, out := &in.HostAliases, &out.HostAliases
*out = make([]v1.HostAlias, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TopologySpreadConstraints != nil {
in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
*out = make([]v1.TopologySpreadConstraint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template.
func (in *Template) DeepCopy() *Template {
if in == nil {
return nil
}
out := new(Template)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"github.com/google/go-cmp/cmp"
)
// Algorithm Standard cryptographic hash algorithm
type Algorithm string
// Artifact represents an artifact within a system, potentially containing multiple values
// associated with it.
type Artifact struct {
// The artifact's identifying category name
Name string `json:"name,omitempty"`
// A collection of values related to the artifact
Values []ArtifactValue `json:"values,omitempty"`
// Indicate if the artifact is a build output or a by-product
BuildOutput bool `json:"buildOutput,omitempty"`
}
// ArtifactValue represents a specific value or data element within an Artifact.
type ArtifactValue struct {
Digest map[Algorithm]string `json:"digest,omitempty"` // Algorithm-specific digests for verifying the content (e.g., SHA256)
Uri string `json:"uri,omitempty"` // Location where the artifact value can be retrieved
}
// TaskRunStepArtifact represents an artifact produced or used by a step within a task run.
// It directly uses the Artifact type for its structure.
type TaskRunStepArtifact = Artifact
// Artifacts represents the collection of input and output artifacts associated with
// a task run or a similar process. Artifacts in this context are units of data or resources
// that the process either consumes as input or produces as output.
type Artifacts struct {
// +listType=atomic
Inputs []Artifact `json:"inputs,omitempty"`
// +listType=atomic
Outputs []Artifact `json:"outputs,omitempty"`
}
func (a *Artifacts) Merge(another *Artifacts) {
inputMap := make(map[string][]ArtifactValue)
var newInputs []Artifact
for _, v := range a.Inputs {
inputMap[v.Name] = v.Values
}
if another != nil {
for _, v := range another.Inputs {
_, ok := inputMap[v.Name]
if !ok {
inputMap[v.Name] = []ArtifactValue{}
}
for _, vv := range v.Values {
exists := false
for _, av := range inputMap[v.Name] {
if cmp.Equal(vv, av) {
exists = true
break
}
}
if !exists {
inputMap[v.Name] = append(inputMap[v.Name], vv)
}
}
}
}
for k, v := range inputMap {
newInputs = append(newInputs, Artifact{
Name: k,
Values: v,
})
}
outputMap := make(map[string]Artifact)
var newOutputs []Artifact
for _, v := range a.Outputs {
outputMap[v.Name] = v
}
if another != nil {
for _, v := range another.Outputs {
_, ok := outputMap[v.Name]
if !ok {
outputMap[v.Name] = Artifact{Name: v.Name, Values: []ArtifactValue{}, BuildOutput: v.BuildOutput}
}
// only update buildOutput to true.
// Do not convert to false if it was true before.
if v.BuildOutput {
art := outputMap[v.Name]
art.BuildOutput = v.BuildOutput
outputMap[v.Name] = art
}
for _, vv := range v.Values {
exists := false
for _, av := range outputMap[v.Name].Values {
if cmp.Equal(vv, av) {
exists = true
break
}
}
if !exists {
art := outputMap[v.Name]
art.Values = append(art.Values, vv)
outputMap[v.Name] = art
}
}
}
}
for _, v := range outputMap {
newOutputs = append(newOutputs, Artifact{
Name: v.Name,
Values: v.Values,
BuildOutput: v.BuildOutput,
})
}
a.Inputs = newInputs
a.Outputs = newOutputs
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Step runs a subcomponent of a Task
type Step struct {
// Name of the Step specified as a DNS_LABEL.
// Each Step in a Task must have a unique name.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// DisplayName is a user-facing name of the step that may be
// used to populate a UI.
// +optional
DisplayName string `json:"displayName,omitempty"`
// Docker image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Step's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of sources to populate environment variables in the Step.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the Step is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
// +listType=atomic
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the Step.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=atomic
Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge" protobuf:"bytes,7,rep,name=env"`
// ComputeResources required by this Step.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
ComputeResources corev1.ResourceRequirements `json:"computeResources,omitempty" protobuf:"bytes,8,opt,name=computeResources"`
// Volumes to mount into the Step's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
// +listType=atomic
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchMergeKey:"mountPath" patchStrategy:"merge" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the Step.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
// +listType=atomic
VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchMergeKey:"devicePath" patchStrategy:"merge" protobuf:"bytes,21,rep,name=volumeDevices"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// SecurityContext defines the security options the Step should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Script is the contents of an executable file to execute.
//
// If Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.
// +optional
Script string `json:"script,omitempty"`
// Timeout is the time after which the step times out. Defaults to never.
// Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha"
// for this field to be supported.
//
// Workspaces is a list of workspaces from the Task that this Step wants
// exclusive access to. Adding a workspace to this list means that any
// other Step or Sidecar that does not also request this Workspace will
// not have access to it.
// +optional
// +listType=atomic
Workspaces []WorkspaceUsage `json:"workspaces,omitempty"`
// OnError defines the exiting behavior of a container on error
// can be set to [ continue | stopAndFail ]
OnError OnErrorType `json:"onError,omitempty"`
// Stores configuration for the stdout stream of the step.
// +optional
StdoutConfig *StepOutputConfig `json:"stdoutConfig,omitempty"`
// Stores configuration for the stderr stream of the step.
// +optional
StderrConfig *StepOutputConfig `json:"stderrConfig,omitempty"`
// Contains the reference to an existing StepAction.
//+optional
Ref *Ref `json:"ref,omitempty"`
// Params declares parameters passed to this step action.
// +optional
Params Params `json:"params,omitempty"`
// Results declares StepResults produced by the Step.
//
// It can be used in an inlined Step when used to store Results to $(step.results.resultName.path).
// It cannot be used when referencing StepActions using [v1.Step.Ref].
// The Results declared by the StepActions will be stored here instead.
// +optional
// +listType=atomic
Results []StepResult `json:"results,omitempty"`
// When is a list of when expressions that need to be true for the task to run
// +optional
When StepWhenExpressions `json:"when,omitempty"`
}
// Ref can be used to refer to a specific instance of a StepAction.
type Ref struct {
// Name of the referenced step
Name string `json:"name,omitempty"`
// ResolverRef allows referencing a StepAction in a remote location
// like a git repo.
// +optional
ResolverRef `json:",omitempty"`
}
// OnErrorType defines a list of supported exiting behavior of a container on error
type OnErrorType string
const (
// StopAndFail indicates exit the taskRun if the container exits with non-zero exit code
StopAndFail OnErrorType = "stopAndFail"
// Continue indicates continue executing the rest of the steps irrespective of the container exit code
Continue OnErrorType = "continue"
)
// StepOutputConfig stores configuration for a step output stream.
type StepOutputConfig struct {
// Path to duplicate stdout stream to on container's local filesystem.
// +optional
Path string `json:"path,omitempty"`
}
// ToK8sContainer converts the Step to a Kubernetes Container struct
func (s *Step) ToK8sContainer() *corev1.Container {
return &corev1.Container{
Name: s.Name,
Image: s.Image,
Command: s.Command,
Args: s.Args,
WorkingDir: s.WorkingDir,
EnvFrom: s.EnvFrom,
Env: s.Env,
Resources: s.ComputeResources,
VolumeMounts: s.VolumeMounts,
VolumeDevices: s.VolumeDevices,
ImagePullPolicy: s.ImagePullPolicy,
SecurityContext: s.SecurityContext,
}
}
// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container
func (s *Step) SetContainerFields(c corev1.Container) {
s.Name = c.Name
s.Image = c.Image
s.Command = c.Command
s.Args = c.Args
s.WorkingDir = c.WorkingDir
s.EnvFrom = c.EnvFrom
s.Env = c.Env
s.ComputeResources = c.Resources
s.VolumeMounts = c.VolumeMounts
s.VolumeDevices = c.VolumeDevices
s.ImagePullPolicy = c.ImagePullPolicy
s.SecurityContext = c.SecurityContext
}
// GetVarSubstitutionExpressions walks all the places a substitution reference can be used
func (s *Step) GetVarSubstitutionExpressions() []string {
var allExpressions []string
allExpressions = append(allExpressions, validateString(s.Name)...)
allExpressions = append(allExpressions, validateString(s.Image)...)
allExpressions = append(allExpressions, validateString(string(s.ImagePullPolicy))...)
allExpressions = append(allExpressions, validateString(s.Script)...)
allExpressions = append(allExpressions, validateString(s.WorkingDir)...)
for _, cmd := range s.Command {
allExpressions = append(allExpressions, validateString(cmd)...)
}
for _, arg := range s.Args {
allExpressions = append(allExpressions, validateString(arg)...)
}
for _, env := range s.Env {
allExpressions = append(allExpressions, validateString(env.Value)...)
if env.ValueFrom != nil {
if env.ValueFrom.SecretKeyRef != nil {
allExpressions = append(allExpressions, validateString(env.ValueFrom.SecretKeyRef.Key)...)
allExpressions = append(allExpressions, validateString(env.ValueFrom.SecretKeyRef.LocalObjectReference.Name)...)
}
if env.ValueFrom.ConfigMapKeyRef != nil {
allExpressions = append(allExpressions, validateString(env.ValueFrom.ConfigMapKeyRef.Key)...)
allExpressions = append(allExpressions, validateString(env.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name)...)
}
}
}
return allExpressions
}
// StepTemplate is a template for a Step
type StepTemplate struct {
// Image reference name.
// More info: https://kubernetes.io/docs/concepts/containers/images
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Step's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of sources to populate environment variables in the Step.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the Step is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
// +listType=atomic
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the Step.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=atomic
Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge" protobuf:"bytes,7,rep,name=env"`
// ComputeResources required by this Step.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
ComputeResources corev1.ResourceRequirements `json:"computeResources,omitempty" protobuf:"bytes,8,opt,name=computeResources"`
// Volumes to mount into the Step's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
// +listType=atomic
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchMergeKey:"mountPath" patchStrategy:"merge" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the Step.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
// +listType=atomic
VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchMergeKey:"devicePath" patchStrategy:"merge" protobuf:"bytes,21,rep,name=volumeDevices"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// SecurityContext defines the security options the Step should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
}
// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container
func (s *StepTemplate) SetContainerFields(c corev1.Container) {
s.Image = c.Image
s.Command = c.Command
s.Args = c.Args
s.WorkingDir = c.WorkingDir
s.EnvFrom = c.EnvFrom
s.Env = c.Env
s.ComputeResources = c.Resources
s.VolumeMounts = c.VolumeMounts
s.VolumeDevices = c.VolumeDevices
s.ImagePullPolicy = c.ImagePullPolicy
s.SecurityContext = c.SecurityContext
}
// ToK8sContainer converts the StepTemplate to a Kubernetes Container struct
func (s *StepTemplate) ToK8sContainer() *corev1.Container {
return &corev1.Container{
Image: s.Image,
Command: s.Command,
Args: s.Args,
WorkingDir: s.WorkingDir,
EnvFrom: s.EnvFrom,
Env: s.Env,
Resources: s.ComputeResources,
VolumeMounts: s.VolumeMounts,
VolumeDevices: s.VolumeDevices,
ImagePullPolicy: s.ImagePullPolicy,
SecurityContext: s.SecurityContext,
}
}
// Sidecar has nearly the same data structure as Step but does not have the ability to timeout.
type Sidecar struct {
// Name of the Sidecar specified as a DNS_LABEL.
// Each Sidecar in a Task must have a unique name (DNS_LABEL).
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Image reference name.
// More info: https://kubernetes.io/docs/concepts/containers/images
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the Sidecar's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the Sidecar's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Sidecar's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the Sidecar. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
// +listType=map
// +listMapKey=containerPort
// +listMapKey=protocol
Ports []corev1.ContainerPort `json:"ports,omitempty" patchMergeKey:"containerPort" patchStrategy:"merge" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the Sidecar.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
// +listType=atomic
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the Sidecar.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=atomic
Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge" protobuf:"bytes,7,rep,name=env"`
// ComputeResources required by this Sidecar.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
ComputeResources corev1.ResourceRequirements `json:"computeResources,omitempty" protobuf:"bytes,8,opt,name=computeResources"`
// Volumes to mount into the Sidecar's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
// +listType=atomic
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchMergeKey:"mountPath" patchStrategy:"merge" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the Sidecar.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
// +listType=atomic
VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchMergeKey:"devicePath" patchStrategy:"merge" protobuf:"bytes,21,rep,name=volumeDevices"`
// Periodic probe of Sidecar liveness.
// Container will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of Sidecar service readiness.
// Container will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// StartupProbe indicates that the Pod the Sidecar is running in has successfully initialized.
// If specified, no other probes are executed until this completes successfully.
// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
// when it might take a long time to load data or warm a cache, than during steady-state operation.
// This cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
StartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
// Actions that the management system should take in response to Sidecar lifecycle events.
// Cannot be updated.
// +optional
Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the Sidecar's termination message
// will be written is mounted into the Sidecar's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the Sidecar status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of Sidecar log output if the termination
// message file is empty and the Sidecar exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// SecurityContext defines the security options the Sidecar should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this Sidecar should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the Sidecar will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on Sidecar start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the Sidecar is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this Sidecar should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
// Script is the contents of an executable file to execute.
//
// If Script is not empty, the Step cannot have an Command or Args.
// +optional
Script string `json:"script,omitempty"`
// This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha"
// for this field to be supported.
//
// Workspaces is a list of workspaces from the Task that this Sidecar wants
// exclusive access to. Adding a workspace to this list means that any
// other Step or Sidecar that does not also request this Workspace will
// not have access to it.
// +optional
// +listType=atomic
Workspaces []WorkspaceUsage `json:"workspaces,omitempty"`
// RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an
// initContainer and must have it's policy set to "Always". It is currently
// left optional to help support Kubernetes versions prior to 1.29 when this feature
// was introduced.
// +optional
RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"`
}
// ToK8sContainer converts the Sidecar to a Kubernetes Container struct
func (s *Sidecar) ToK8sContainer() *corev1.Container {
if s.RestartPolicy == nil {
return &corev1.Container{
Name: s.Name,
Image: s.Image,
Command: s.Command,
Args: s.Args,
WorkingDir: s.WorkingDir,
Ports: s.Ports,
EnvFrom: s.EnvFrom,
Env: s.Env,
Resources: s.ComputeResources,
VolumeMounts: s.VolumeMounts,
VolumeDevices: s.VolumeDevices,
LivenessProbe: s.LivenessProbe,
ReadinessProbe: s.ReadinessProbe,
StartupProbe: s.StartupProbe,
Lifecycle: s.Lifecycle,
TerminationMessagePath: s.TerminationMessagePath,
TerminationMessagePolicy: s.TerminationMessagePolicy,
ImagePullPolicy: s.ImagePullPolicy,
SecurityContext: s.SecurityContext,
Stdin: s.Stdin,
StdinOnce: s.StdinOnce,
TTY: s.TTY,
}
}
return &corev1.Container{
Name: s.Name,
Image: s.Image,
Command: s.Command,
Args: s.Args,
WorkingDir: s.WorkingDir,
Ports: s.Ports,
EnvFrom: s.EnvFrom,
Env: s.Env,
Resources: s.ComputeResources,
VolumeMounts: s.VolumeMounts,
VolumeDevices: s.VolumeDevices,
LivenessProbe: s.LivenessProbe,
ReadinessProbe: s.ReadinessProbe,
RestartPolicy: s.RestartPolicy,
StartupProbe: s.StartupProbe,
Lifecycle: s.Lifecycle,
TerminationMessagePath: s.TerminationMessagePath,
TerminationMessagePolicy: s.TerminationMessagePolicy,
ImagePullPolicy: s.ImagePullPolicy,
SecurityContext: s.SecurityContext,
Stdin: s.Stdin,
StdinOnce: s.StdinOnce,
TTY: s.TTY,
}
}
// SetContainerFields sets the fields of the Sidecar to the values of the corresponding fields in the Container
func (s *Sidecar) SetContainerFields(c corev1.Container) {
s.Name = c.Name
s.Image = c.Image
s.Command = c.Command
s.Args = c.Args
s.WorkingDir = c.WorkingDir
s.Ports = c.Ports
s.EnvFrom = c.EnvFrom
s.Env = c.Env
s.ComputeResources = c.Resources
s.VolumeMounts = c.VolumeMounts
s.VolumeDevices = c.VolumeDevices
s.LivenessProbe = c.LivenessProbe
s.ReadinessProbe = c.ReadinessProbe
s.StartupProbe = c.StartupProbe
s.Lifecycle = c.Lifecycle
s.TerminationMessagePath = c.TerminationMessagePath
s.TerminationMessagePolicy = c.TerminationMessagePolicy
s.ImagePullPolicy = c.ImagePullPolicy
s.SecurityContext = c.SecurityContext
s.Stdin = c.Stdin
s.StdinOnce = c.StdinOnce
s.TTY = c.TTY
s.RestartPolicy = c.RestartPolicy
}
// GetVarSubstitutionExpressions walks all the places a substitution reference can be used
func (s *Sidecar) GetVarSubstitutionExpressions() []string {
var allExpressions []string
allExpressions = append(allExpressions, validateString(s.Name)...)
allExpressions = append(allExpressions, validateString(s.Image)...)
allExpressions = append(allExpressions, validateString(string(s.ImagePullPolicy))...)
allExpressions = append(allExpressions, validateString(s.Script)...)
allExpressions = append(allExpressions, validateString(s.WorkingDir)...)
for _, cmd := range s.Command {
allExpressions = append(allExpressions, validateString(cmd)...)
}
for _, arg := range s.Args {
allExpressions = append(allExpressions, validateString(arg)...)
}
for _, env := range s.Env {
allExpressions = append(allExpressions, validateString(env.Value)...)
if env.ValueFrom != nil {
if env.ValueFrom.SecretKeyRef != nil {
allExpressions = append(allExpressions, validateString(env.ValueFrom.SecretKeyRef.Key)...)
allExpressions = append(allExpressions, validateString(env.ValueFrom.SecretKeyRef.LocalObjectReference.Name)...)
}
if env.ValueFrom.ConfigMapKeyRef != nil {
allExpressions = append(allExpressions, validateString(env.ValueFrom.ConfigMapKeyRef.Key)...)
allExpressions = append(allExpressions, validateString(env.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name)...)
}
}
}
return allExpressions
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"errors"
"fmt"
"regexp"
"slices"
"strings"
"time"
"github.com/tektoncd/pipeline/internal/artifactref"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
"k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
)
// Validate ensures that a supplied Ref field is populated
// correctly. No errors are returned for a nil Ref.
func (ref *Ref) Validate(ctx context.Context) (errs *apis.FieldError) {
if ref == nil {
return errs
}
return validateRef(ctx, ref.Name, ref.Resolver, ref.Params)
}
func validateRef(ctx context.Context, refName string, refResolver ResolverName, refParams Params) (errs *apis.FieldError) {
switch {
case refResolver != "" || refParams != nil:
if refParams != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
if refName != "" {
errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
}
if refResolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
errs = errs.Also(ValidateParameters(ctx, refParams))
}
if refResolver != "" {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
if refName != "" {
// make sure that the name is url-like.
err := RefNameLikeUrl(refName)
if err == nil && !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
// If name is url-like then concise resolver syntax must be enabled
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
}
if err != nil {
errs = errs.Also(apis.ErrInvalidValue(err, "name"))
}
}
}
case refName != "":
// ref name can be a Url-like format.
if err := RefNameLikeUrl(refName); err == nil {
// If name is url-like then concise resolver syntax must be enabled
if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
}
// In stage1 of concise remote resolvers syntax, this is a required field.
// TODO: remove this check when implementing stage 2 where this is optional.
if refResolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
// Or, it must be a valid k8s name
} else {
// ref name must be a valid k8s name
if errSlice := validation.IsQualifiedName(refName); len(errSlice) != 0 {
errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
}
}
default:
errs = errs.Also(apis.ErrMissingField("name"))
}
return errs
}
// RefNameLikeUrl checks if the name is url parsable and returns an error if it isn't.
func RefNameLikeUrl(name string) error {
schemeRegex := regexp.MustCompile(`[\w-]+:\/\/*`)
if !schemeRegex.MatchString(name) {
return errors.New("invalid URI for request")
}
return nil
}
// Validate implements apis.Validatable
func (s *Step) Validate(ctx context.Context) (errs *apis.FieldError) {
if err := validateArtifactsReferencesInStep(ctx, s); err != nil {
return err
}
if s.Ref != nil {
errs = errs.Also(s.Ref.Validate(ctx))
if s.Image != "" {
errs = errs.Also(&apis.FieldError{
Message: "image cannot be used with Ref",
Paths: []string{"image"},
})
}
if len(s.Command) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "command cannot be used with Ref",
Paths: []string{"command"},
})
}
if len(s.Args) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "args cannot be used with Ref",
Paths: []string{"args"},
})
}
if s.Script != "" {
errs = errs.Also(&apis.FieldError{
Message: "script cannot be used with Ref",
Paths: []string{"script"},
})
}
if s.WorkingDir != "" {
errs = errs.Also(&apis.FieldError{
Message: "working dir cannot be used with Ref",
Paths: []string{"workingDir"},
})
}
if s.Env != nil {
errs = errs.Also(&apis.FieldError{
Message: "env cannot be used with Ref",
Paths: []string{"env"},
})
}
if len(s.VolumeMounts) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "volumeMounts cannot be used with Ref",
Paths: []string{"volumeMounts"},
})
}
if len(s.Results) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "results cannot be used with Ref",
Paths: []string{"results"},
})
}
} else {
if len(s.Params) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "params cannot be used without Ref",
Paths: []string{"params"},
})
}
if s.Image == "" {
errs = errs.Also(apis.ErrMissingField("Image"))
}
if s.Script != "" {
if len(s.Command) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "script cannot be used with command",
Paths: []string{"script"},
})
}
}
}
if s.Name != "" {
if e := validation.IsDNS1123Label(s.Name); len(e) > 0 {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("invalid value %q", s.Name),
Paths: []string{"name"},
Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
})
}
}
if s.Timeout != nil {
if s.Timeout.Duration < time.Duration(0) {
return apis.ErrInvalidValue(s.Timeout.Duration, "negative timeout")
}
}
for j, vm := range s.VolumeMounts {
if strings.HasPrefix(vm.MountPath, "/tekton/") &&
!strings.HasPrefix(vm.MountPath, "/tekton/home") {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("volumeMount cannot be mounted under /tekton/ (volumeMount %q mounted at %q)", vm.Name, vm.MountPath), "mountPath").ViaFieldIndex("volumeMounts", j))
}
if strings.HasPrefix(vm.Name, "tekton-internal-") {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf(`volumeMount name %q cannot start with "tekton-internal-"`, vm.Name), "name").ViaFieldIndex("volumeMounts", j))
}
}
if s.OnError != "" {
if !isParamRefs(string(s.OnError)) && s.OnError != Continue && s.OnError != StopAndFail {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("invalid value: \"%v\"", s.OnError),
Paths: []string{"onError"},
Details: "Task step onError must be either \"continue\" or \"stopAndFail\"",
})
}
}
if s.Script != "" {
cleaned := strings.TrimSpace(s.Script)
if strings.HasPrefix(cleaned, "#!win") {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script"))
}
}
// StdoutConfig is an alpha feature and will fail validation if it's used in a task spec
// when the enable-api-fields feature gate is not "alpha".
if s.StdoutConfig != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "step stdout stream support", config.AlphaAPIFields).ViaField("stdoutconfig"))
}
// StderrConfig is an alpha feature and will fail validation if it's used in a task spec
// when the enable-api-fields feature gate is not "alpha".
if s.StderrConfig != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "step stderr stream support", config.AlphaAPIFields).ViaField("stderrconfig"))
}
// Validate usage of step result reference.
// Referencing previous step's results are only allowed in `env`, `command` and `args`.
errs = errs.Also(validateStepResultReference(s))
// Validate usage of step artifacts output reference
// Referencing previous step's results are only allowed in `env`, `command` and `args`, `script`.
errs = errs.Also(validateStepArtifactsReference(s))
return errs
}
// isParamRefs attempts to check if a specified string looks like it contains any parameter reference
// This is useful to make sure the specified value looks like a Parameter Reference before performing any strict validation
func isParamRefs(s string) bool {
return strings.HasPrefix(s, "$("+ParamsPrefix)
}
func validateArtifactsReferencesInStep(ctx context.Context, s *Step) *apis.FieldError {
cfg := config.FromContextOrDefaults(ctx)
if cfg == nil || cfg.FeatureFlags == nil {
cfg = &config.Config{
FeatureFlags: &config.FeatureFlags{},
}
}
if !cfg.FeatureFlags.EnableArtifacts {
var t []string
if s.Script != "" {
t = append(t, s.Script)
}
if len(s.Command) > 0 {
t = append(t, s.Command...)
}
if len(s.Args) > 0 {
t = append(t, s.Args...)
}
if s.Env != nil {
for _, e := range s.Env {
if e.Value != "" {
t = append(t, e.Value)
}
}
}
if slices.ContainsFunc(t, stepArtifactReferenceExists) || slices.ContainsFunc(t, taskArtifactReferenceExists) {
return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "")
}
}
return nil
}
func stepArtifactReferenceExists(src string) bool {
return len(artifactref.StepArtifactRegex.FindAllStringSubmatch(src, -1)) > 0 || strings.Contains(src, "$("+artifactref.StepArtifactPathPattern+")")
}
func taskArtifactReferenceExists(src string) bool {
return len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(src, -1)) > 0 || strings.Contains(src, "$("+artifactref.TaskArtifactPathPattern+")")
}
func validateStepResultReference(s *Step) (errs *apis.FieldError) {
errs = errs.Also(errorIfStepResultReferencedInField(s.Name, "name"))
errs = errs.Also(errorIfStepResultReferencedInField(s.Image, "image"))
errs = errs.Also(errorIfStepResultReferencedInField(s.Script, "script"))
errs = errs.Also(errorIfStepResultReferencedInField(string(s.ImagePullPolicy), "imagePullPolicy"))
errs = errs.Also(errorIfStepResultReferencedInField(s.WorkingDir, "workingDir"))
for _, e := range s.EnvFrom {
errs = errs.Also(errorIfStepResultReferencedInField(e.Prefix, "envFrom.prefix"))
if e.ConfigMapRef != nil {
errs = errs.Also(errorIfStepResultReferencedInField(e.ConfigMapRef.LocalObjectReference.Name, "envFrom.configMapRef"))
}
if e.SecretRef != nil {
errs = errs.Also(errorIfStepResultReferencedInField(e.SecretRef.LocalObjectReference.Name, "envFrom.secretRef"))
}
}
for _, v := range s.VolumeMounts {
errs = errs.Also(errorIfStepResultReferencedInField(v.Name, "volumeMounts.name"))
errs = errs.Also(errorIfStepResultReferencedInField(v.MountPath, "volumeMounts.mountPath"))
errs = errs.Also(errorIfStepResultReferencedInField(v.SubPath, "volumeMounts.subPath"))
}
for _, v := range s.VolumeDevices {
errs = errs.Also(errorIfStepResultReferencedInField(v.Name, "volumeDevices.name"))
errs = errs.Also(errorIfStepResultReferencedInField(v.DevicePath, "volumeDevices.devicePath"))
}
return errs
}
func errorIfStepResultReferencedInField(value, fieldName string) (errs *apis.FieldError) {
matches := resultref.StepResultRegex.FindAllStringSubmatch(value, -1)
if len(matches) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
Paths: []string{fieldName},
})
}
return errs
}
func validateStepArtifactsReference(s *Step) (errs *apis.FieldError) {
errs = errs.Also(errorIfStepArtifactReferencedInField(s.Name, "name"))
errs = errs.Also(errorIfStepArtifactReferencedInField(s.Image, "image"))
errs = errs.Also(errorIfStepArtifactReferencedInField(string(s.ImagePullPolicy), "imagePullPolicy"))
errs = errs.Also(errorIfStepArtifactReferencedInField(s.WorkingDir, "workingDir"))
for _, e := range s.EnvFrom {
errs = errs.Also(errorIfStepArtifactReferencedInField(e.Prefix, "envFrom.prefix"))
if e.ConfigMapRef != nil {
errs = errs.Also(errorIfStepArtifactReferencedInField(e.ConfigMapRef.LocalObjectReference.Name, "envFrom.configMapRef"))
}
if e.SecretRef != nil {
errs = errs.Also(errorIfStepArtifactReferencedInField(e.SecretRef.LocalObjectReference.Name, "envFrom.secretRef"))
}
}
for _, v := range s.VolumeMounts {
errs = errs.Also(errorIfStepArtifactReferencedInField(v.Name, "volumeMounts.name"))
errs = errs.Also(errorIfStepArtifactReferencedInField(v.MountPath, "volumeMounts.mountPath"))
errs = errs.Also(errorIfStepArtifactReferencedInField(v.SubPath, "volumeMounts.subPath"))
}
for _, v := range s.VolumeDevices {
errs = errs.Also(errorIfStepArtifactReferencedInField(v.Name, "volumeDevices.name"))
errs = errs.Also(errorIfStepArtifactReferencedInField(v.DevicePath, "volumeDevices.devicePath"))
}
return errs
}
func errorIfStepArtifactReferencedInField(value, fieldName string) (errs *apis.FieldError) {
if stepArtifactReferenceExists(value) {
errs = errs.Also(&apis.FieldError{
Message: "stepArtifact substitutions are only allowed in env, command, args and script. Found usage in",
Paths: []string{fieldName},
})
}
return errs
}
func (sc *Sidecar) Validate(ctx context.Context) (errs *apis.FieldError) {
if sc.Name == pipeline.ReservedResultsSidecarName {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("Invalid: cannot use reserved sidecar name %v ", sc.Name),
Paths: []string{"name"},
})
}
if sc.Image == "" {
errs = errs.Also(apis.ErrMissingField("image"))
}
if sc.Script != "" {
if len(sc.Command) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "script cannot be used with command",
Paths: []string{"script"},
})
}
}
return errs
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"maps"
"sort"
"github.com/tektoncd/pipeline/pkg/apis/config"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/strings/slices"
"knative.dev/pkg/apis"
)
// Matrix is used to fan out Tasks in a Pipeline
type Matrix struct {
// Params is a list of parameters used to fan out the pipelineTask
// Params takes only `Parameters` of type `"array"`
// Each array element is supplied to the `PipelineTask` by substituting `params` of type `"string"` in the underlying `Task`.
// The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting.
Params Params `json:"params,omitempty"`
// Include is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix.
// +optional
Include IncludeParamsList `json:"include,omitempty"`
}
// IncludeParamsList is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix.
// +listType=atomic
type IncludeParamsList []IncludeParams
// IncludeParams allows passing in a specific combinations of Parameters into the Matrix.
type IncludeParams struct {
// Name the specified combination
Name string `json:"name,omitempty"`
// Params takes only `Parameters` of type `"string"`
// The names of the `params` must match the names of the `params` in the underlying `Task`
Params Params `json:"params,omitempty"`
}
// Combination is a map, mainly defined to hold a single combination from a Matrix with key as param.Name and value as param.Value
type Combination map[string]string
// Combinations is a Combination list
type Combinations []Combination
// FanOut returns an list of params that represent combinations
func (m *Matrix) FanOut() []Params {
var combinations, includeCombinations Combinations
includeCombinations = m.getIncludeCombinations()
if m.HasInclude() && !m.HasParams() {
// If there are only Matrix Include Parameters return explicit combinations
return includeCombinations.toParams()
}
// Generate combinations from Matrix Parameters
for _, parameter := range m.Params {
combinations = combinations.fanOutMatrixParams(parameter)
}
combinations.overwriteCombinations(includeCombinations)
combinations = combinations.addNewCombinations(includeCombinations)
return combinations.toParams()
}
// overwriteCombinations replaces any missing include params in the initial
// matrix params combinations by overwriting the initial combinations with the
// include combinations
func (cs Combinations) overwriteCombinations(ics Combinations) {
for _, paramCombination := range cs {
for _, includeCombination := range ics {
if paramCombination.contains(includeCombination) {
// overwrite the parameter name and value in existing combination
// with the include combination
for name, val := range includeCombination {
paramCombination[name] = val
}
}
}
}
}
// addNewCombinations creates a new combination for any include parameter
// values that are missing entirely from the initial combinations and
// returns all combinations
func (cs Combinations) addNewCombinations(ics Combinations) Combinations {
for _, includeCombination := range ics {
if cs.shouldAddNewCombination(includeCombination) {
cs = append(cs, includeCombination)
}
}
return cs
}
// contains returns true if the include parameter name and value exists in combinations
func (c Combination) contains(includeCombination Combination) bool {
for name, val := range includeCombination {
if _, exist := c[name]; exist {
if c[name] != val {
return false
}
}
}
return true
}
// shouldAddNewCombination returns true if the include parameter name exists but the value is
// missing from combinations
func (cs Combinations) shouldAddNewCombination(includeCombination map[string]string) bool {
if len(includeCombination) == 0 {
return false
}
for _, paramCombination := range cs {
for name, val := range includeCombination {
if _, exist := paramCombination[name]; exist {
if paramCombination[name] == val {
return false
}
}
}
}
return true
}
// toParams transforms Combinations from a slice of map[string]string to a slice of Params
// such that, these combinations can be directly consumed in creating taskRun/run object
func (cs Combinations) toParams() []Params {
listOfParams := make([]Params, len(cs))
for i := range cs {
var params Params
combination := cs[i]
order, _ := combination.sortCombination()
for _, key := range order {
params = append(params, Param{
Name: key,
Value: ParamValue{Type: ParamTypeString, StringVal: combination[key]},
})
}
listOfParams[i] = params
}
return listOfParams
}
// fanOutMatrixParams generates new combinations based on Matrix Parameters.
func (cs Combinations) fanOutMatrixParams(param Param) Combinations {
if len(cs) == 0 {
return initializeCombinations(param)
}
return cs.distribute(param)
}
// getIncludeCombinations generates combinations based on Matrix Include Parameters
func (m *Matrix) getIncludeCombinations() Combinations {
var combinations Combinations
for i := range m.Include {
includeParams := m.Include[i].Params
newCombination := make(Combination)
for _, param := range includeParams {
newCombination[param.Name] = param.Value.StringVal
}
combinations = append(combinations, newCombination)
}
return combinations
}
// distribute generates a new Combination of Parameters by adding a new Parameter to an existing list of Combinations.
func (cs Combinations) distribute(param Param) Combinations {
var expandedCombinations Combinations
for _, value := range param.Value.ArrayVal {
for _, combination := range cs {
newCombination := make(Combination)
maps.Copy(newCombination, combination)
newCombination[param.Name] = value
_, orderedCombination := newCombination.sortCombination()
expandedCombinations = append(expandedCombinations, orderedCombination)
}
}
return expandedCombinations
}
// initializeCombinations generates a new Combination based on the first Parameter in the Matrix.
func initializeCombinations(param Param) Combinations {
var combinations Combinations
for _, value := range param.Value.ArrayVal {
combinations = append(combinations, Combination{param.Name: value})
}
return combinations
}
// sortCombination sorts the given Combination based on the Parameter names to produce a deterministic ordering
func (c Combination) sortCombination() ([]string, Combination) {
sortedCombination := make(Combination, len(c))
order := make([]string, 0, len(c))
for key := range c {
order = append(order, key)
}
sort.Slice(order, func(i, j int) bool {
return order[i] <= order[j]
})
for _, key := range order {
sortedCombination[key] = c[key]
}
return order, sortedCombination
}
// CountCombinations returns the count of Combinations of Parameters generated from the Matrix in PipelineTask.
func (m *Matrix) CountCombinations() int {
// Iterate over Matrix Parameters and compute count of all generated Combinations
count := m.countGeneratedCombinationsFromParams()
// Add any additional Combinations generated from Matrix Include Parameters
count += m.countNewCombinationsFromInclude()
return count
}
// countGeneratedCombinationsFromParams returns the count of Combinations of Parameters generated from the Matrix
// Parameters
func (m *Matrix) countGeneratedCombinationsFromParams() int {
if !m.HasParams() {
return 0
}
count := 1
for _, param := range m.Params {
if len(param.Value.ArrayVal) > 0 {
count *= len(param.Value.ArrayVal)
}
}
return count
}
// countNewCombinationsFromInclude returns the count of Combinations of Parameters generated from the Matrix
// Include Parameters
func (m *Matrix) countNewCombinationsFromInclude() int {
if !m.HasInclude() {
return 0
}
if !m.HasParams() {
return len(m.Include)
}
count := 0
matrixParamMap := m.Params.extractParamMapArrVals()
for _, include := range m.Include {
for _, param := range include.Params {
if val, exist := matrixParamMap[param.Name]; exist {
// If the Matrix Include param values does not exist, a new Combination will be generated
if !slices.Contains(val, param.Value.StringVal) {
count++
} else {
break
}
}
}
}
return count
}
// HasInclude returns true if the Matrix has Include Parameters
func (m *Matrix) HasInclude() bool {
return m != nil && m.Include != nil && len(m.Include) > 0
}
// HasParams returns true if the Matrix has Parameters
func (m *Matrix) HasParams() bool {
return m != nil && m.Params != nil && len(m.Params) > 0
}
// GetAllParams returns a list of all Matrix Parameters
func (m *Matrix) GetAllParams() Params {
var params Params
if m.HasParams() {
params = append(params, m.Params...)
}
if m.HasInclude() {
for _, include := range m.Include {
params = append(params, include.Params...)
}
}
return params
}
func (m *Matrix) validateCombinationsCount(ctx context.Context) (errs *apis.FieldError) {
matrixCombinationsCount := m.CountCombinations()
maxMatrixCombinationsCount := config.FromContextOrDefaults(ctx).Defaults.DefaultMaxMatrixCombinationsCount
if matrixCombinationsCount > maxMatrixCombinationsCount {
errs = errs.Also(apis.ErrOutOfBoundsValue(matrixCombinationsCount, 0, maxMatrixCombinationsCount, "matrix"))
}
return errs
}
// validateUniqueParams validates Matrix.Params for a unique list of params
// and a unique list of params in each Matrix.Include.Params specification
func (m *Matrix) validateUniqueParams() (errs *apis.FieldError) {
if m != nil {
if m.HasInclude() {
for i, include := range m.Include {
errs = errs.Also(include.Params.validateDuplicateParameters().ViaField(fmt.Sprintf("matrix.include[%d].params", i)))
}
}
if m.HasParams() {
errs = errs.Also(m.Params.validateDuplicateParameters().ViaField("matrix.params"))
}
}
return errs
}
// validatePipelineParametersVariablesInMatrixParameters validates all pipeline parameter variables including Matrix.Params and Matrix.Include.Params
// that may contain the reference(s) to other params to make sure those references are used appropriately.
func (m *Matrix) validatePipelineParametersVariablesInMatrixParameters(prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
if m.HasInclude() {
for _, include := range m.Include {
for idx, param := range include.Params {
stringElement := param.Value.StringVal
// Matrix Include Params must be of type string
errs = errs.Also(validateStringVariable(stringElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("", idx).ViaField("matrix.include.params", ""))
}
}
}
if m.HasParams() {
for _, param := range m.Params {
for idx, arrayElement := range param.Value.ArrayVal {
// Matrix Params must be of type array
errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("matrix.params", param.Name))
}
}
}
return errs
}
func (m *Matrix) validateParameterInOneOfMatrixOrParams(params []Param) (errs *apis.FieldError) {
matrixParamNames := m.GetAllParams().ExtractNames()
for _, param := range params {
if matrixParamNames.Has(param.Name) {
errs = errs.Also(apis.ErrMultipleOneOf("matrix["+param.Name+"]", "params["+param.Name+"]"))
}
}
return errs
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"encoding/json"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/strategicpatch"
)
// mergeData is used to store the intermediate data needed to merge an object
// with a template. It's provided to avoid repeatedly re-serializing the template.
// +k8s:openapi-gen=false
type mergeData struct {
emptyJSON []byte
templateJSON []byte
patchSchema strategicpatch.PatchMetaFromStruct
}
// MergeStepsWithStepTemplate takes a possibly nil container template and a
// list of steps, merging each of the steps with the container template, if
// it's not nil, and returning the resulting list.
func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, error) {
if template == nil {
return steps, nil
}
md, err := getMergeData(template.ToK8sContainer(), &corev1.Container{})
if err != nil {
return nil, err
}
for i, s := range steps {
// If the stepaction has not been fetched yet then do not merge.
// Skip over to the next one
if s.Ref != nil {
continue
}
merged := corev1.Container{}
err := mergeObjWithTemplateBytes(md, s.ToK8sContainer(), &merged)
if err != nil {
return nil, err
}
// If the container's args is nil, reset it to empty instead
if merged.Args == nil && s.Args != nil {
merged.Args = []string{}
}
amendConflictingContainerFields(&merged, s)
// Pass through original step Script, for later conversion.
newStep := Step{
Script: s.Script,
OnError: s.OnError,
Timeout: s.Timeout,
StdoutConfig: s.StdoutConfig,
StderrConfig: s.StderrConfig,
Results: s.Results,
Params: s.Params,
Ref: s.Ref,
When: s.When,
Workspaces: s.Workspaces,
}
newStep.SetContainerFields(merged)
steps[i] = newStep
}
return steps, nil
}
// MergeStepsWithSpecs takes a possibly nil list of overrides and a
// list of steps, merging each of the steps with the overrides' resource requirements, if
// it's not nil, and returning the resulting list.
func MergeStepsWithSpecs(steps []Step, overrides []TaskRunStepSpec) ([]Step, error) {
stepNameToOverride := make(map[string]TaskRunStepSpec, len(overrides))
for _, o := range overrides {
stepNameToOverride[o.Name] = o
}
for i, s := range steps {
o, found := stepNameToOverride[s.Name]
if !found {
continue
}
merged := corev1.ResourceRequirements{}
err := mergeObjWithTemplate(&s.ComputeResources, &o.ComputeResources, &merged)
if err != nil {
return nil, err
}
steps[i].ComputeResources = merged
}
return steps, nil
}
// MergeSidecarsWithSpecs takes a possibly nil list of overrides and a
// list of sidecars, merging each of the sidecars with the overrides' resource requirements, if
// it's not nil, and returning the resulting list.
func MergeSidecarsWithSpecs(sidecars []Sidecar, overrides []TaskRunSidecarSpec) ([]Sidecar, error) {
if len(overrides) == 0 {
return sidecars, nil
}
sidecarNameToOverride := make(map[string]TaskRunSidecarSpec, len(overrides))
for _, o := range overrides {
sidecarNameToOverride[o.Name] = o
}
for i, s := range sidecars {
o, found := sidecarNameToOverride[s.Name]
if !found {
continue
}
merged := corev1.ResourceRequirements{}
err := mergeObjWithTemplate(&s.ComputeResources, &o.ComputeResources, &merged)
if err != nil {
return nil, err
}
sidecars[i].ComputeResources = merged
}
return sidecars, nil
}
// mergeObjWithTemplate merges obj with template and updates out to reflect the merged result.
// template, obj, and out should point to the same type. out points to the zero value of that type.
func mergeObjWithTemplate(template, obj, out interface{}) error {
md, err := getMergeData(template, out)
if err != nil {
return err
}
return mergeObjWithTemplateBytes(md, obj, out)
}
// getMergeData serializes the template and empty object to get the intermediate results necessary for
// merging an object of the same type with this template.
// This function is provided to avoid repeatedly serializing an identical template.
func getMergeData(template, empty interface{}) (*mergeData, error) {
// We need JSON bytes to generate a patch to merge the object
// onto the template, so marshal the template.
templateJSON, err := json.Marshal(template)
if err != nil {
return nil, err
}
// We need to do a three-way merge to actually merge the template and
// object, so we need an empty object as the "original"
emptyJSON, err := json.Marshal(empty)
if err != nil {
return nil, err
}
// Get the patch meta, which is needed for generating and applying the merge patch.
patchSchema, err := strategicpatch.NewPatchMetaFromStruct(template)
if err != nil {
return nil, err
}
return &mergeData{templateJSON: templateJSON, emptyJSON: emptyJSON, patchSchema: patchSchema}, nil
}
// mergeObjWithTemplateBytes merges obj with md's template JSON and updates out to reflect the merged result.
// out is a pointer to the zero value of obj's type.
// This function is provided to avoid repeatedly serializing an identical template.
func mergeObjWithTemplateBytes(md *mergeData, obj, out interface{}) error {
// Marshal the object to JSON
objAsJSON, err := json.Marshal(obj)
if err != nil {
return err
}
// Create a merge patch, with the empty JSON as the original, the object JSON as the modified, and the template
// JSON as the current - this lets us do a deep merge of the template and object, with awareness of
// the "patchMerge" tags.
patch, err := strategicpatch.CreateThreeWayMergePatch(md.emptyJSON, objAsJSON, md.templateJSON, md.patchSchema, true)
if err != nil {
return err
}
// Actually apply the merge patch to the template JSON.
mergedAsJSON, err := strategicpatch.StrategicMergePatchUsingLookupPatchMeta(md.templateJSON, patch, md.patchSchema)
if err != nil {
return err
}
// Unmarshal the merged JSON to a pointer, and return it.
return json.Unmarshal(mergedAsJSON, out)
}
// amendConflictingContainerFields amends conflicting container fields after merge, and overrides conflicting fields
// by fields in step.
func amendConflictingContainerFields(container *corev1.Container, step Step) {
if container == nil || len(step.Env) == 0 {
return
}
envNameToStepEnv := make(map[string]corev1.EnvVar, len(step.Env))
for _, e := range step.Env {
envNameToStepEnv[e.Name] = e
}
for index, env := range container.Env {
if env.ValueFrom != nil && len(env.Value) > 0 {
if e, ok := envNameToStepEnv[env.Name]; ok {
container.Env[index] = e
}
}
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by openapi-gen. DO NOT EDIT.
package v1
import (
common "k8s.io/kube-openapi/pkg/common"
spec "k8s.io/kube-openapi/pkg/validation/spec"
)
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifact": schema_pkg_apis_pipeline_v1_Artifact(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArtifactValue": schema_pkg_apis_pipeline_v1_ArtifactValue(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifacts": schema_pkg_apis_pipeline_v1_Artifacts(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ChildStatusReference": schema_pkg_apis_pipeline_v1_ChildStatusReference(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.EmbeddedTask": schema_pkg_apis_pipeline_v1_EmbeddedTask(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.IncludeParams": schema_pkg_apis_pipeline_v1_IncludeParams(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Matrix": schema_pkg_apis_pipeline_v1_Matrix(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param": schema_pkg_apis_pipeline_v1_Param(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec": schema_pkg_apis_pipeline_v1_ParamSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue": schema_pkg_apis_pipeline_v1_ParamValue(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Pipeline": schema_pkg_apis_pipeline_v1_Pipeline(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineList": schema_pkg_apis_pipeline_v1_PipelineList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRef": schema_pkg_apis_pipeline_v1_PipelineRef(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineResult": schema_pkg_apis_pipeline_v1_PipelineResult(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRun": schema_pkg_apis_pipeline_v1_PipelineRun(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunList": schema_pkg_apis_pipeline_v1_PipelineRunList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunResult": schema_pkg_apis_pipeline_v1_PipelineRunResult(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunRunStatus": schema_pkg_apis_pipeline_v1_PipelineRunRunStatus(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunSpec": schema_pkg_apis_pipeline_v1_PipelineRunSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunStatus": schema_pkg_apis_pipeline_v1_PipelineRunStatus(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunStatusFields": schema_pkg_apis_pipeline_v1_PipelineRunStatusFields(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunTaskRunStatus": schema_pkg_apis_pipeline_v1_PipelineRunTaskRunStatus(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec": schema_pkg_apis_pipeline_v1_PipelineSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTask": schema_pkg_apis_pipeline_v1_PipelineTask(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskMetadata": schema_pkg_apis_pipeline_v1_PipelineTaskMetadata(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskParam": schema_pkg_apis_pipeline_v1_PipelineTaskParam(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRun": schema_pkg_apis_pipeline_v1_PipelineTaskRun(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunSpec": schema_pkg_apis_pipeline_v1_PipelineTaskRunSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunTemplate": schema_pkg_apis_pipeline_v1_PipelineTaskRunTemplate(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec": schema_pkg_apis_pipeline_v1_PropertySpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance": schema_pkg_apis_pipeline_v1_Provenance(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Ref": schema_pkg_apis_pipeline_v1_Ref(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource": schema_pkg_apis_pipeline_v1_RefSource(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverRef": schema_pkg_apis_pipeline_v1_ResolverRef(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResultRef": schema_pkg_apis_pipeline_v1_ResultRef(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar": schema_pkg_apis_pipeline_v1_Sidecar(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState": schema_pkg_apis_pipeline_v1_SidecarState(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SkippedTask": schema_pkg_apis_pipeline_v1_SkippedTask(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step": schema_pkg_apis_pipeline_v1_Step(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig": schema_pkg_apis_pipeline_v1_StepOutputConfig(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult": schema_pkg_apis_pipeline_v1_StepResult(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState": schema_pkg_apis_pipeline_v1_StepState(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate": schema_pkg_apis_pipeline_v1_StepTemplate(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task": schema_pkg_apis_pipeline_v1_Task(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskBreakpoints": schema_pkg_apis_pipeline_v1_TaskBreakpoints(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskList": schema_pkg_apis_pipeline_v1_TaskList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef": schema_pkg_apis_pipeline_v1_TaskRef(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult": schema_pkg_apis_pipeline_v1_TaskResult(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRun": schema_pkg_apis_pipeline_v1_TaskRun(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunDebug": schema_pkg_apis_pipeline_v1_TaskRunDebug(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunInputs": schema_pkg_apis_pipeline_v1_TaskRunInputs(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunList": schema_pkg_apis_pipeline_v1_TaskRunList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult": schema_pkg_apis_pipeline_v1_TaskRunResult(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSidecarSpec": schema_pkg_apis_pipeline_v1_TaskRunSidecarSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSpec": schema_pkg_apis_pipeline_v1_TaskRunSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus": schema_pkg_apis_pipeline_v1_TaskRunStatus(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatusFields": schema_pkg_apis_pipeline_v1_TaskRunStatusFields(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStepSpec": schema_pkg_apis_pipeline_v1_TaskRunStepSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec": schema_pkg_apis_pipeline_v1_TaskSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TimeoutFields": schema_pkg_apis_pipeline_v1_TimeoutFields(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression": schema_pkg_apis_pipeline_v1_WhenExpression(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding": schema_pkg_apis_pipeline_v1_WorkspaceBinding(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration": schema_pkg_apis_pipeline_v1_WorkspaceDeclaration(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspacePipelineTaskBinding": schema_pkg_apis_pipeline_v1_WorkspacePipelineTaskBinding(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage": schema_pkg_apis_pipeline_v1_WorkspaceUsage(ref),
}
}
func schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "AffinityAssistantTemplate holds pod specific configuration and is a subset of the generic pod Template",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"tolerations": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's tolerations.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"imagePullSecrets": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
},
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext sets the security context for the pod",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"},
}
}
func schema_pkg_apis_pipeline_pod_Template(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Template holds pod specific configuration",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables that can be provided to the containers belonging to the pod.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"tolerations": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's tolerations.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's scheduling constraints. See Pod.spec.affinity (API version: v1)",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. See Pod.spec.securityContext (API version: v1)",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"volumes": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge,retainKeys",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes See Pod.spec.volumes (API version: v1)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Volume"),
},
},
},
},
},
"runtimeClassName": {
SchemaProps: spec.SchemaProps{
Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.",
Type: []string{"string"},
Format: "",
},
},
"automountServiceAccountToken": {
SchemaProps: spec.SchemaProps{
Description: "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.",
Type: []string{"boolean"},
Format: "",
},
},
"dnsPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.",
Type: []string{"string"},
Format: "",
},
},
"dnsConfig": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
Ref: ref("k8s.io/api/core/v1.PodDNSConfig"),
},
},
"enableServiceLinks": {
SchemaProps: spec.SchemaProps{
Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
Type: []string{"boolean"},
Format: "",
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
Type: []string{"string"},
Format: "",
},
},
"schedulerName": {
SchemaProps: spec.SchemaProps{
Description: "SchedulerName specifies the scheduler to be used to dispatch the Pod",
Type: []string{"string"},
Format: "",
},
},
"imagePullSecrets": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
},
},
},
"hostAliases": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.HostAlias"),
},
},
},
},
},
"hostNetwork": {
SchemaProps: spec.SchemaProps{
Description: "HostNetwork specifies whether the pod may use the node network namespace",
Type: []string{"boolean"},
Format: "",
},
},
"topologySpreadConstraints": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "TopologySpreadConstraints controls how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume"},
}
}
func schema_pkg_apis_pipeline_v1_Artifact(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunStepArtifact represents an artifact produced or used by a step within a task run. It directly uses the Artifact type for its structure.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The artifact's identifying category name",
Type: []string{"string"},
Format: "",
},
},
"values": {
SchemaProps: spec.SchemaProps{
Description: "A collection of values related to the artifact",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArtifactValue"),
},
},
},
},
},
"buildOutput": {
SchemaProps: spec.SchemaProps{
Description: "Indicate if the artifact is a build output or a by-product",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArtifactValue"},
}
}
func schema_pkg_apis_pipeline_v1_ArtifactValue(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ArtifactValue represents a specific value or data element within an Artifact.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"digest": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"uri": {
SchemaProps: spec.SchemaProps{
Description: "Algorithm-specific digests for verifying the content (e.g., SHA256)",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1_Artifacts(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Artifacts represents the collection of input and output artifacts associated with a task run or a similar process. Artifacts in this context are units of data or resources that the process either consumes as input or produces as output.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"inputs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifact"),
},
},
},
},
},
"outputs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifact"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifact"},
}
}
func schema_pkg_apis_pipeline_v1_ChildStatusReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ChildStatusReference is used to point to the statuses of individual TaskRuns and Runs within this PipelineRun.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the TaskRun or Run this is referencing.",
Type: []string{"string"},
Format: "",
},
},
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is a user-facing name of the pipelineTask that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"pipelineTaskName": {
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskName is the name of the PipelineTask this is referencing.",
Type: []string{"string"},
Format: "",
},
},
"whenExpressions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"},
}
}
func schema_pkg_apis_pipeline_v1_EmbeddedTask(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EmbeddedTask is used to define a Task inline within a Pipeline's PipelineTasks.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is a specification of a custom task",
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskMetadata"),
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec"),
},
},
},
},
},
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is a user-facing name of the task that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the task that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"steps": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step"),
},
},
},
},
},
"volumes": {
SchemaProps: spec.SchemaProps{
Description: "Volumes is a collection of volumes that are available to mount into the steps of the build. See Pod.spec.volumes (API version: v1)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Volume"),
},
},
},
},
},
"stepTemplate": {
SchemaProps: spec.SchemaProps{
Description: "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate"),
},
},
"sidecars": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar"),
},
},
},
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces are the volumes that this Task requires.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are values that this Task can output",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskMetadata", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_pkg_apis_pipeline_v1_IncludeParams(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "IncludeParams allows passing in a specific combinations of Parameters into the Matrix.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name the specified combination",
Type: []string{"string"},
Format: "",
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params takes only `Parameters` of type `\"string\"` The names of the `params` must match the names of the `params` in the underlying `Task`",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"},
}
}
func schema_pkg_apis_pipeline_v1_Matrix(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Matrix is used to fan out Tasks in a Pipeline",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params is a list of parameters used to fan out the pipelineTask Params takes only `Parameters` of type `\"array\"` Each array element is supplied to the `PipelineTask` by substituting `params` of type `\"string\"` in the underlying `Task`. The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"),
},
},
},
},
},
"include": {
SchemaProps: spec.SchemaProps{
Description: "Include is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.IncludeParams"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.IncludeParams", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"},
}
}
func schema_pkg_apis_pipeline_v1_Param(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Param declares an ParamValues to use for the parameter called name.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"),
},
},
},
Required: []string{"name", "value"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"},
}
}
func schema_pkg_apis_pipeline_v1_ParamSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ParamSpec defines arbitrary parameters needed beyond typed inputs (such as resources). Parameter values are provided by users as inputs on a TaskRun or PipelineRun.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name declares the name by which a parameter is referenced.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is the user-specified type of the parameter. The possible types are currently \"string\", \"array\" and \"object\", and \"string\" is the default.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the parameter that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"properties": {
SchemaProps: spec.SchemaProps{
Description: "Properties is the JSON Schema properties to support key-value pairs parameter.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"),
},
},
},
},
},
"default": {
SchemaProps: spec.SchemaProps{
Description: "Default is the value a parameter takes if no input value is supplied. If default is set, a Task may be executed without a supplied value for the parameter.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"),
},
},
"enum": {
SchemaProps: spec.SchemaProps{
Description: "Enum declares a set of allowed param input values for tasks/pipelines that can be validated. If Enum is not set, no input validation is performed for the param.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"},
}
}
func schema_pkg_apis_pipeline_v1_ParamValue(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResultValue is a type alias of ParamValue",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"Type": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"StringVal": {
SchemaProps: spec.SchemaProps{
Description: "Represents the stored type of ParamValues.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"ArrayVal": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"ObjectVal": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"Type", "StringVal", "ArrayVal", "ObjectVal"},
},
},
}
}
func schema_pkg_apis_pipeline_v1_Pipeline(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Pipeline describes a list of Tasks to execute. It expresses how outputs of tasks feed into inputs of subsequent tasks.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the Pipeline from the client",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineList contains a list of Pipeline",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Pipeline"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Pipeline", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRef can be used to refer to a specific instance of a Pipeline.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1_PipelineResult(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineResult used to describe the results of a pipeline",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name the given name",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is the user-specified type of the result. The possible types are 'string', 'array', and 'object', with 'string' as the default. 'array' and 'object' types are alpha features.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a human-readable description of the result",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value the expression used to retrieve the value",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"),
},
},
},
Required: []string{"name", "value"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineRun(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineRunList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunList contains a list of PipelineRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRun"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRun", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunResult used to describe the results of a pipeline",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the result's name as declared by the Pipeline",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value is the result returned from the execution of this PipelineRun",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"),
},
},
},
Required: []string{"name", "value"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineRunRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunRunStatus contains the name of the PipelineTask for this Run and the Run's Status",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipelineTaskName": {
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskName is the name of the PipelineTask.",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status is the RunStatus for the corresponding Run",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/run/v1beta1.CustomRunStatus"),
},
},
"whenExpressions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/run/v1beta1.CustomRunStatus"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunSpec defines the desired state of PipelineRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipelineRef": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRef"),
},
},
"pipelineSpec": {
SchemaProps: spec.SchemaProps{
Description: "Specifying PipelineSpec can be disabled by setting `disable-inline-spec` feature flag. See Pipeline.spec (API version: tekton.dev/v1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec"),
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params is a list of parameter names and values.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"),
},
},
},
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Used for cancelling a pipelinerun (and maybe more later on)",
Type: []string{"string"},
Format: "",
},
},
"timeouts": {
SchemaProps: spec.SchemaProps{
Description: "Time after which the Pipeline times out. Currently three keys are accepted in the map pipeline, tasks and finally with Timeouts.pipeline >= Timeouts.tasks + Timeouts.finally",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TimeoutFields"),
},
},
"taskRunTemplate": {
SchemaProps: spec.SchemaProps{
Description: "TaskRunTemplate represent template of taskrun",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunTemplate"),
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces holds a set of workspace bindings that must match names with those declared in the pipeline.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding"),
},
},
},
},
},
"taskRunSpecs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "TaskRunSpecs holds a set of runtime specs",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunSpec"),
},
},
},
},
},
"managedBy": {
SchemaProps: spec.SchemaProps{
Description: "ManagedBy indicates which controller is responsible for reconciling this resource. If unset or set to \"tekton.dev/pipeline\", the default Tekton controller will manage this resource. This field is immutable.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunTemplate", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TimeoutFields", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunStatus defines the observed state of PipelineRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"observedGeneration": {
SchemaProps: spec.SchemaProps{
Description: "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.",
Type: []string{"integer"},
Format: "int64",
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Conditions the latest available observations of a resource's current state.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("knative.dev/pkg/apis.Condition"),
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"startTime": {
SchemaProps: spec.SchemaProps{
Description: "StartTime is the time the PipelineRun is actually started.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"completionTime": {
SchemaProps: spec.SchemaProps{
Description: "CompletionTime is the time the PipelineRun completed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are the list of results written out by the pipeline task's containers",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunResult"),
},
},
},
},
},
"pipelineSpec": {
SchemaProps: spec.SchemaProps{
Description: "PipelineSpec contains the exact spec used to instantiate the run. See Pipeline.spec (API version: tekton.dev/v1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec"),
},
},
"skippedTasks": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "list of tasks that were skipped due to when expressions evaluating to false",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SkippedTask"),
},
},
},
},
},
"childReferences": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "list of TaskRun and Run names, PipelineTask names, and API versions/kinds for children of this PipelineRun.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ChildStatusReference"),
},
},
},
},
},
"finallyStartTime": {
SchemaProps: spec.SchemaProps{
Description: "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"provenance": {
SchemaProps: spec.SchemaProps{
Description: "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance"),
},
},
"spanContext": {
SchemaProps: spec.SchemaProps{
Description: "SpanContext contains tracing span context fields",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ChildStatusReference", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SkippedTask", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineRunStatusFields(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunStatusFields holds the fields of PipelineRunStatus' status. This is defined separately and inlined so that other types can readily consume these fields via duck typing.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"startTime": {
SchemaProps: spec.SchemaProps{
Description: "StartTime is the time the PipelineRun is actually started.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"completionTime": {
SchemaProps: spec.SchemaProps{
Description: "CompletionTime is the time the PipelineRun completed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are the list of results written out by the pipeline task's containers",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunResult"),
},
},
},
},
},
"pipelineSpec": {
SchemaProps: spec.SchemaProps{
Description: "PipelineSpec contains the exact spec used to instantiate the run. See Pipeline.spec (API version: tekton.dev/v1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec"),
},
},
"skippedTasks": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "list of tasks that were skipped due to when expressions evaluating to false",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SkippedTask"),
},
},
},
},
},
"childReferences": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "list of TaskRun and Run names, PipelineTask names, and API versions/kinds for children of this PipelineRun.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ChildStatusReference"),
},
},
},
},
},
"finallyStartTime": {
SchemaProps: spec.SchemaProps{
Description: "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"provenance": {
SchemaProps: spec.SchemaProps{
Description: "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance"),
},
},
"spanContext": {
SchemaProps: spec.SchemaProps{
Description: "SpanContext contains tracing span context fields",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ChildStatusReference", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SkippedTask", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineRunTaskRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipelineTaskName": {
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskName is the name of the PipelineTask.",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status is the TaskRunStatus for the corresponding TaskRun",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus"),
},
},
"whenExpressions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineSpec defines the desired state of Pipeline.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is a user-facing name of the pipeline that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the pipeline that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"tasks": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Tasks declares the graph of Tasks that execute when this Pipeline is run.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTask"),
},
},
},
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params declares a list of input parameters that must be supplied when this Pipeline is run.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec"),
},
},
},
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces declares a set of named workspaces that are expected to be provided by a PipelineRun.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are values that this pipeline can output once run",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineResult"),
},
},
},
},
},
"finally": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Finally declares the list of Tasks that execute just before leaving the Pipeline i.e. either after all Tasks are finished executing successfully or after a failure which would result in ending the Pipeline",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTask"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineTask(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTask defines a task in a Pipeline, passing inputs from both Params and from the output of previous tasks.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of this task within the context of a Pipeline. Name is used as a coordinate with the `from` and `runAfter` fields to establish the execution order of tasks relative to one another.",
Type: []string{"string"},
Format: "",
},
},
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is the display name of this task within the context of a Pipeline. This display name may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is the description of this task within the context of a Pipeline. This description may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"taskRef": {
SchemaProps: spec.SchemaProps{
Description: "TaskRef is a reference to a task definition.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef"),
},
},
"taskSpec": {
SchemaProps: spec.SchemaProps{
Description: "TaskSpec is a specification of a task Specifying TaskSpec can be disabled by setting `disable-inline-spec` feature flag. See Task.spec (API version: tekton.dev/v1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.EmbeddedTask"),
},
},
"when": {
SchemaProps: spec.SchemaProps{
Description: "When is a list of when expressions that need to be true for the task to run",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"),
},
},
},
},
},
"retries": {
SchemaProps: spec.SchemaProps{
Description: "Retries represents how many times this task should be retried in case of task failure: ConditionSucceeded set to False",
Type: []string{"integer"},
Format: "int32",
},
},
"runAfter": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "RunAfter is the list of PipelineTask names that should be executed before this Task executes. (Used to force a specific ordering in graph execution.)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Parameters declares parameters passed to this task.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"),
},
},
},
},
},
"matrix": {
SchemaProps: spec.SchemaProps{
Description: "Matrix declares parameters used to fan out this task.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Matrix"),
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces maps workspaces from the pipeline spec to the workspaces declared in the Task.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspacePipelineTaskBinding"),
},
},
},
},
},
"timeout": {
SchemaProps: spec.SchemaProps{
Description: "Duration after which the TaskRun times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"pipelineRef": {
SchemaProps: spec.SchemaProps{
Description: "PipelineRef is a reference to a pipeline definition Note: PipelineRef is in preview mode and not yet supported",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRef"),
},
},
"pipelineSpec": {
SchemaProps: spec.SchemaProps{
Description: "PipelineSpec is a specification of a pipeline Note: PipelineSpec is in preview mode and not yet supported Specifying PipelineSpec can be disabled by setting `disable-inline-spec` feature flag. See Pipeline.spec (API version: tekton.dev/v1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec"),
},
},
"onError": {
SchemaProps: spec.SchemaProps{
Description: "OnError defines the exiting behavior of a PipelineRun on error can be set to [ continue | stopAndFail ]",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.EmbeddedTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Matrix", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspacePipelineTaskBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineTaskMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskMetadata contains the labels or annotations for an EmbeddedTask",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"labels": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1_PipelineTaskParam(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskParam is used to provide arbitrary string parameters to a Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "value"},
},
},
}
}
func schema_pkg_apis_pipeline_v1_PipelineTaskRun(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskRun reports the results of running a step in the Task. Each task has the potential to succeed or fail (based on the exit code) and produces logs.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1_PipelineTaskRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskRunSpec can be used to configure specific specs for a concrete Task",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipelineTaskName": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"serviceAccountName": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"podTemplate": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"),
},
},
"stepSpecs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStepSpec"),
},
},
},
},
},
"sidecarSpecs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSidecarSpec"),
},
},
},
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskMetadata"),
},
},
"computeResources": {
SchemaProps: spec.SchemaProps{
Description: "Compute resources to use for this TaskRun",
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"timeout": {
SchemaProps: spec.SchemaProps{
Description: "Duration after which the TaskRun times out. Overrides the timeout specified on the Task's spec if specified. Takes lower precedence to PipelineRun's `spec.timeouts.tasks` Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskMetadata", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSidecarSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStepSpec", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineTaskRunTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskRunTemplate is used to specify run specifications for all Task in pipelinerun.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"podTemplate": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"),
},
},
"serviceAccountName": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"},
}
}
func schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding.\n\nDeprecated: use PipelineWorkspaceDeclaration type instead",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of a workspace to be provided by a PipelineRun.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.",
Type: []string{"string"},
Format: "",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
}
}
func schema_pkg_apis_pipeline_v1_PropertySpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PropertySpec defines the struct for object keys",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1_Provenance(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Provenance contains metadata about resources used in the TaskRun/PipelineRun such as the source from where a remote build definition was fetched. This field aims to carry minimum amoumt of metadata in *Run status so that Tekton Chains can capture them in the provenance.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"refSource": {
SchemaProps: spec.SchemaProps{
Description: "RefSource identifies the source where a remote task/pipeline came from.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"),
},
},
"featureFlags": {
SchemaProps: spec.SchemaProps{
Description: "FeatureFlags identifies the feature flags that were used during the task/pipeline run",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/config.FeatureFlags"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/config.FeatureFlags", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"},
}
}
func schema_pkg_apis_pipeline_v1_Ref(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Ref can be used to refer to a specific instance of a StepAction.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referenced step",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1_RefSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RefSource contains the information that can uniquely identify where a remote built definition came from i.e. Git repositories, Tekton Bundles in OCI registry and hub.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"uri": {
SchemaProps: spec.SchemaProps{
Description: "URI indicates the identity of the source of the build definition. Example: \"https://github.com/tektoncd/catalog\"",
Type: []string{"string"},
Format: "",
},
},
"digest": {
SchemaProps: spec.SchemaProps{
Description: "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"entryPoint": {
SchemaProps: spec.SchemaProps{
Description: "EntryPoint identifies the entry point into the build. This is often a path to a build definition file and/or a target label within that file. Example: \"task/git-clone/0.10/git-clone.yaml\"",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1_ResolverRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo. This feature is in beta and these fields are only available when the beta feature gate is enabled.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"resolver": {
SchemaProps: spec.SchemaProps{
Description: "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".",
Type: []string{"string"},
Format: "",
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"},
}
}
func schema_pkg_apis_pipeline_v1_ResultRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResultRef is a type that represents a reference to a task run result",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipelineTask": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"result": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resultsIndex": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"property": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"pipelineTask", "result", "resultsIndex", "property"},
},
},
}
}
func schema_pkg_apis_pipeline_v1_Sidecar(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the Sidecar specified as a DNS_LABEL. Each Sidecar in a Task must have a unique name (DNS_LABEL). Cannot be updated.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "Image reference name. More info: https://kubernetes.io/docs/concepts/containers/images",
Type: []string{"string"},
Format: "",
},
},
"command": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Sidecar's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"args": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Sidecar's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"workingDir": {
SchemaProps: spec.SchemaProps{
Description: "Sidecar's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"ports": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"containerPort",
"protocol",
},
"x-kubernetes-list-type": "map",
"x-kubernetes-patch-merge-key": "containerPort",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of ports to expose from the Sidecar. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ContainerPort"),
},
},
},
},
},
"envFrom": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of sources to populate environment variables in the Sidecar. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvFromSource"),
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables to set in the Sidecar. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"computeResources": {
SchemaProps: spec.SchemaProps{
Description: "ComputeResources required by this Sidecar. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"volumeMounts": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Volumes to mount into the Sidecar's filesystem. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeMount"),
},
},
},
},
},
"volumeDevices": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "devicePath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "volumeDevices is the list of block devices to be used by the Sidecar.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeDevice"),
},
},
},
},
},
"livenessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Periodic probe of Sidecar liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"readinessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Periodic probe of Sidecar service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"startupProbe": {
SchemaProps: spec.SchemaProps{
Description: "StartupProbe indicates that the Pod the Sidecar is running in has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"lifecycle": {
SchemaProps: spec.SchemaProps{
Description: "Actions that the management system should take in response to Sidecar lifecycle events. Cannot be updated.",
Ref: ref("k8s.io/api/core/v1.Lifecycle"),
},
},
"terminationMessagePath": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Path at which the file to which the Sidecar's termination message will be written is mounted into the Sidecar's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"terminationMessagePolicy": {
SchemaProps: spec.SchemaProps{
Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the Sidecar status message on both success and failure. FallbackToLogsOnError will use the last chunk of Sidecar log output if the termination message file is empty and the Sidecar exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
Type: []string{"string"},
Format: "",
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext defines the security options the Sidecar should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
Ref: ref("k8s.io/api/core/v1.SecurityContext"),
},
},
"stdin": {
SchemaProps: spec.SchemaProps{
Description: "Whether this Sidecar should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the Sidecar will always result in EOF. Default is false.",
Type: []string{"boolean"},
Format: "",
},
},
"stdinOnce": {
SchemaProps: spec.SchemaProps{
Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on Sidecar start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the Sidecar is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
Type: []string{"boolean"},
Format: "",
},
},
"tty": {
SchemaProps: spec.SchemaProps{
Description: "Whether this Sidecar should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
Type: []string{"boolean"},
Format: "",
},
},
"script": {
SchemaProps: spec.SchemaProps{
Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.",
Type: []string{"string"},
Format: "",
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"),
},
},
},
},
},
"restartPolicy": {
SchemaProps: spec.SchemaProps{
Description: "RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an initContainer and must have it's policy set to \"Always\". It is currently left optional to help support Kubernetes versions prior to 1.29 when this feature was introduced.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"},
}
}
func schema_pkg_apis_pipeline_v1_SidecarState(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SidecarState reports the results of running a sidecar in a Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"waiting": {
SchemaProps: spec.SchemaProps{
Description: "Details about a waiting container",
Ref: ref("k8s.io/api/core/v1.ContainerStateWaiting"),
},
},
"running": {
SchemaProps: spec.SchemaProps{
Description: "Details about a running container",
Ref: ref("k8s.io/api/core/v1.ContainerStateRunning"),
},
},
"terminated": {
SchemaProps: spec.SchemaProps{
Description: "Details about a terminated container",
Ref: ref("k8s.io/api/core/v1.ContainerStateTerminated"),
},
},
"name": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"container": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"imageID": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"},
}
}
func schema_pkg_apis_pipeline_v1_SkippedTask(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SkippedTask is used to describe the Tasks that were skipped due to their When Expressions evaluating to False. This is a struct because we are looking into including more details about the When Expressions that caused this Task to be skipped.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the Pipeline Task name",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "Reason is the cause of the PipelineTask being skipped.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"whenExpressions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"),
},
},
},
},
},
},
Required: []string{"name", "reason"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"},
}
}
func schema_pkg_apis_pipeline_v1_Step(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Step runs a subcomponent of a Task",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the Step specified as a DNS_LABEL. Each Step in a Task must have a unique name.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is a user-facing name of the step that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images",
Type: []string{"string"},
Format: "",
},
},
"command": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"args": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"workingDir": {
SchemaProps: spec.SchemaProps{
Description: "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"envFrom": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of sources to populate environment variables in the Step. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the Step is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvFromSource"),
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables to set in the Step. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"computeResources": {
SchemaProps: spec.SchemaProps{
Description: "ComputeResources required by this Step. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"volumeMounts": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Volumes to mount into the Step's filesystem. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeMount"),
},
},
},
},
},
"volumeDevices": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "devicePath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "volumeDevices is the list of block devices to be used by the Step.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeDevice"),
},
},
},
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
Type: []string{"string"},
Format: "",
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
Ref: ref("k8s.io/api/core/v1.SecurityContext"),
},
},
"script": {
SchemaProps: spec.SchemaProps{
Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.",
Type: []string{"string"},
Format: "",
},
},
"timeout": {
SchemaProps: spec.SchemaProps{
Description: "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"),
},
},
},
},
},
"onError": {
SchemaProps: spec.SchemaProps{
Description: "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ]",
Type: []string{"string"},
Format: "",
},
},
"stdoutConfig": {
SchemaProps: spec.SchemaProps{
Description: "Stores configuration for the stdout stream of the step.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig"),
},
},
"stderrConfig": {
SchemaProps: spec.SchemaProps{
Description: "Stores configuration for the stderr stream of the step.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig"),
},
},
"ref": {
SchemaProps: spec.SchemaProps{
Description: "Contains the reference to an existing StepAction.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Ref"),
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params declares parameters passed to this step action.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results declares StepResults produced by the Step.\n\nIt can be used in an inlined Step when used to store Results to $(step.results.resultName.path). It cannot be used when referencing StepActions using [v1.Step.Ref]. The Results declared by the StepActions will be stored here instead.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult"),
},
},
},
},
},
"when": {
SchemaProps: spec.SchemaProps{
Description: "When is a list of when expressions that need to be true for the task to run",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"),
},
},
},
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Ref", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1_StepOutputConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepOutputConfig stores configuration for a step output stream.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path to duplicate stdout stream to on container's local filesystem.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1_StepResult(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepResult used to describe the Results of a Step.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name the given name",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "The possible types are 'string', 'array', and 'object', with 'string' as the default.",
Type: []string{"string"},
Format: "",
},
},
"properties": {
SchemaProps: spec.SchemaProps{
Description: "Properties is the JSON Schema properties to support key-value pairs results.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"),
},
},
},
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a human-readable description of the result",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"},
}
}
func schema_pkg_apis_pipeline_v1_StepState(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepState reports the results of running a step in a Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"waiting": {
SchemaProps: spec.SchemaProps{
Description: "Details about a waiting container",
Ref: ref("k8s.io/api/core/v1.ContainerStateWaiting"),
},
},
"running": {
SchemaProps: spec.SchemaProps{
Description: "Details about a running container",
Ref: ref("k8s.io/api/core/v1.ContainerStateRunning"),
},
},
"terminated": {
SchemaProps: spec.SchemaProps{
Description: "Details about a terminated container",
Ref: ref("k8s.io/api/core/v1.ContainerStateTerminated"),
},
},
"name": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"container": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"imageID": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"results": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult"),
},
},
},
},
},
"provenance": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance"),
},
},
"terminationReason": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"inputs": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifact"),
},
},
},
},
},
"outputs": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifact"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifact", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult", "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"},
}
}
func schema_pkg_apis_pipeline_v1_StepTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepTemplate is a template for a Step",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"image": {
SchemaProps: spec.SchemaProps{
Description: "Image reference name. More info: https://kubernetes.io/docs/concepts/containers/images",
Type: []string{"string"},
Format: "",
},
},
"command": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"args": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"workingDir": {
SchemaProps: spec.SchemaProps{
Description: "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"envFrom": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of sources to populate environment variables in the Step. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the Step is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvFromSource"),
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables to set in the Step. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"computeResources": {
SchemaProps: spec.SchemaProps{
Description: "ComputeResources required by this Step. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"volumeMounts": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Volumes to mount into the Step's filesystem. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeMount"),
},
},
},
},
},
"volumeDevices": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "devicePath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "volumeDevices is the list of block devices to be used by the Step.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeDevice"),
},
},
},
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
Type: []string{"string"},
Format: "",
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
Ref: ref("k8s.io/api/core/v1.SecurityContext"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"},
}
}
func schema_pkg_apis_pipeline_v1_Task(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the Task from the client",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1_TaskBreakpoints(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskBreakpoints defines the breakpoint config for a particular Task",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"onFailure": {
SchemaProps: spec.SchemaProps{
Description: "if enabled, pause TaskRun on failure of a step failed step will not exit",
Type: []string{"string"},
Format: "",
},
},
"beforeSteps": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1_TaskList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskList contains a list of Task",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1_TaskRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRef can be used to refer to a specific instance of a task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "TaskKind indicates the Kind of the Task: 1. Namespaced Task when Kind is set to \"Task\". If Kind is \"\", it defaults to \"Task\". 2. Custom Task when Kind is non-empty and APIVersion is non-empty",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent Note: A Task with non-empty APIVersion and Kind is considered a Custom Task",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1_TaskResult(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskResult used to describe the results of a task",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name the given name",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.",
Type: []string{"string"},
Format: "",
},
},
"properties": {
SchemaProps: spec.SchemaProps{
Description: "Properties is the JSON Schema properties to support key-value pairs results.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"),
},
},
},
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a human-readable description of the result",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value the expression used to retrieve the value of the result from an underlying Step.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"),
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"},
}
}
func schema_pkg_apis_pipeline_v1_TaskRun(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1_TaskRunDebug(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunDebug defines the breakpoint config for a particular TaskRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"breakpoints": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskBreakpoints"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskBreakpoints"},
}
}
func schema_pkg_apis_pipeline_v1_TaskRunInputs(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunInputs holds the input values that this task was invoked with.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"params": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"},
}
}
func schema_pkg_apis_pipeline_v1_TaskRunList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunList contains a list of TaskRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRun"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRun", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1_TaskRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunStepResult is a type alias of TaskRunResult",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name the given name",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value the given value of the result",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"),
},
},
},
Required: []string{"name", "value"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"},
}
}
func schema_pkg_apis_pipeline_v1_TaskRunSidecarSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunSidecarSpec is used to override the values of a Sidecar in the corresponding Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The name of the Sidecar to override.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"computeResources": {
SchemaProps: spec.SchemaProps{
Description: "The resource requirements to apply to the Sidecar.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
},
Required: []string{"name", "computeResources"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ResourceRequirements"},
}
}
func schema_pkg_apis_pipeline_v1_TaskRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunSpec defines the desired state of TaskRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"debug": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunDebug"),
},
},
"params": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"),
},
},
},
},
},
"serviceAccountName": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"taskRef": {
SchemaProps: spec.SchemaProps{
Description: "no more than one of the TaskRef and TaskSpec may be specified.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef"),
},
},
"taskSpec": {
SchemaProps: spec.SchemaProps{
Description: "Specifying TaskSpec can be disabled by setting `disable-inline-spec` feature flag. See Task.spec (API version: tekton.dev/v1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Used for cancelling a TaskRun (and maybe more later on)",
Type: []string{"string"},
Format: "",
},
},
"statusMessage": {
SchemaProps: spec.SchemaProps{
Description: "Status message for cancellation.",
Type: []string{"string"},
Format: "",
},
},
"retries": {
SchemaProps: spec.SchemaProps{
Description: "Retries represents how many times this TaskRun should be retried in the event of task failure.",
Type: []string{"integer"},
Format: "int32",
},
},
"timeout": {
SchemaProps: spec.SchemaProps{
Description: "Time after which one retry attempt times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"podTemplate": {
SchemaProps: spec.SchemaProps{
Description: "PodTemplate holds pod specific configuration",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"),
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces is a list of WorkspaceBindings from volumes to workspaces.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding"),
},
},
},
},
},
"stepSpecs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Specs to apply to Steps in this TaskRun. If a field is specified in both a Step and a StepSpec, the value from the StepSpec will be used. This field is only supported when the alpha feature gate is enabled.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStepSpec"),
},
},
},
},
},
"sidecarSpecs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Specs to apply to Sidecars in this TaskRun. If a field is specified in both a Sidecar and a SidecarSpec, the value from the SidecarSpec will be used. This field is only supported when the alpha feature gate is enabled.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSidecarSpec"),
},
},
},
},
},
"computeResources": {
SchemaProps: spec.SchemaProps{
Description: "Compute resources to use for this TaskRun",
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"managedBy": {
SchemaProps: spec.SchemaProps{
Description: "ManagedBy indicates which controller is responsible for reconciling this resource. If unset or set to \"tekton.dev/pipeline\", the default Tekton controller will manage this resource. This field is immutable.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunDebug", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSidecarSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStepSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1_TaskRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunStatus defines the observed state of TaskRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"observedGeneration": {
SchemaProps: spec.SchemaProps{
Description: "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.",
Type: []string{"integer"},
Format: "int64",
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Conditions the latest available observations of a resource's current state.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("knative.dev/pkg/apis.Condition"),
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"podName": {
SchemaProps: spec.SchemaProps{
Description: "PodName is the name of the pod responsible for executing this task's steps.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"startTime": {
SchemaProps: spec.SchemaProps{
Description: "StartTime is the time the build is actually started.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"completionTime": {
SchemaProps: spec.SchemaProps{
Description: "CompletionTime is the time the build completed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"steps": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Steps describes the state of each build step container.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState"),
},
},
},
},
},
"retriesStatus": {
SchemaProps: spec.SchemaProps{
Description: "RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures. All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are the list of results written out by the task's containers",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult"),
},
},
},
},
},
"artifacts": {
SchemaProps: spec.SchemaProps{
Description: "Artifacts are the list of artifacts written out by the task's containers",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifacts"),
},
},
"sidecars": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "The list has one entry per sidecar in the manifest. Each entry is represents the imageid of the corresponding sidecar.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState"),
},
},
},
},
},
"taskSpec": {
SchemaProps: spec.SchemaProps{
Description: "TaskSpec contains the Spec from the dereferenced Task definition used to instantiate this TaskRun.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"),
},
},
"provenance": {
SchemaProps: spec.SchemaProps{
Description: "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance"),
},
},
"spanContext": {
SchemaProps: spec.SchemaProps{
Description: "SpanContext contains tracing span context fields",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"podName"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifacts", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"},
}
}
func schema_pkg_apis_pipeline_v1_TaskRunStatusFields(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunStatusFields holds the fields of TaskRun's status. This is defined separately and inlined so that other types can readily consume these fields via duck typing.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"podName": {
SchemaProps: spec.SchemaProps{
Description: "PodName is the name of the pod responsible for executing this task's steps.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"startTime": {
SchemaProps: spec.SchemaProps{
Description: "StartTime is the time the build is actually started.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"completionTime": {
SchemaProps: spec.SchemaProps{
Description: "CompletionTime is the time the build completed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"steps": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Steps describes the state of each build step container.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState"),
},
},
},
},
},
"retriesStatus": {
SchemaProps: spec.SchemaProps{
Description: "RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures. All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are the list of results written out by the task's containers",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult"),
},
},
},
},
},
"artifacts": {
SchemaProps: spec.SchemaProps{
Description: "Artifacts are the list of artifacts written out by the task's containers",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifacts"),
},
},
"sidecars": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "The list has one entry per sidecar in the manifest. Each entry is represents the imageid of the corresponding sidecar.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState"),
},
},
},
},
},
"taskSpec": {
SchemaProps: spec.SchemaProps{
Description: "TaskSpec contains the Spec from the dereferenced Task definition used to instantiate this TaskRun.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"),
},
},
"provenance": {
SchemaProps: spec.SchemaProps{
Description: "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance"),
},
},
"spanContext": {
SchemaProps: spec.SchemaProps{
Description: "SpanContext contains tracing span context fields",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"podName"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifacts", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_pipeline_v1_TaskRunStepSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunStepSpec is used to override the values of a Step in the corresponding Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The name of the Step to override.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"computeResources": {
SchemaProps: spec.SchemaProps{
Description: "The resource requirements to apply to the Step.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
},
Required: []string{"name", "computeResources"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ResourceRequirements"},
}
}
func schema_pkg_apis_pipeline_v1_TaskSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskSpec defines the desired state of Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec"),
},
},
},
},
},
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is a user-facing name of the task that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the task that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"steps": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step"),
},
},
},
},
},
"volumes": {
SchemaProps: spec.SchemaProps{
Description: "Volumes is a collection of volumes that are available to mount into the steps of the build. See Pod.spec.volumes (API version: v1)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Volume"),
},
},
},
},
},
"stepTemplate": {
SchemaProps: spec.SchemaProps{
Description: "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate"),
},
},
"sidecars": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar"),
},
},
},
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces are the volumes that this Task requires.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are values that this Task can output",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration", "k8s.io/api/core/v1.Volume"},
}
}
func schema_pkg_apis_pipeline_v1_TimeoutFields(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TimeoutFields allows granular specification of pipeline, task, and finally timeouts",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipeline": {
SchemaProps: spec.SchemaProps{
Description: "Pipeline sets the maximum allowed duration for execution of the entire pipeline. The sum of individual timeouts for tasks and finally must not exceed this value.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"tasks": {
SchemaProps: spec.SchemaProps{
Description: "Tasks sets the maximum allowed duration of this pipeline's tasks",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"finally": {
SchemaProps: spec.SchemaProps{
Description: "Finally sets the maximum allowed duration of this pipeline's finally",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1_WhenExpression(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WhenExpression allows a PipelineTask to declare expressions to be evaluated before the Task is run to determine whether the Task should be executed or skipped",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"input": {
SchemaProps: spec.SchemaProps{
Description: "Input is the string for guard checking which can be a static input or an output from a parent Task",
Type: []string{"string"},
Format: "",
},
},
"operator": {
SchemaProps: spec.SchemaProps{
Description: "Operator that represents an Input's relationship to the values",
Type: []string{"string"},
Format: "",
},
},
"values": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Values is an array of strings, which is compared against the input, for guard checking It must be non-empty",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"cel": {
SchemaProps: spec.SchemaProps{
Description: "CEL is a string of Common Language Expression, which can be used to conditionally execute the task based on the result of the expression evaluation More info about CEL syntax: https://github.com/google/cel-spec/blob/master/doc/langdef.md",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1_WorkspaceBinding(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WorkspaceBinding maps a Task's declared workspace to a Volume.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the workspace populated by the volume.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"subPath": {
SchemaProps: spec.SchemaProps{
Description: "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).",
Type: []string{"string"},
Format: "",
},
},
"volumeClaimTemplate": {
SchemaProps: spec.SchemaProps{
Description: "VolumeClaimTemplate is a template for a claim that will be created in the same namespace. The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun. See PersistentVolumeClaim (API version: v1)",
Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"),
},
},
"persistentVolumeClaim": {
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.",
Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"),
},
},
"emptyDir": {
SchemaProps: spec.SchemaProps{
Description: "EmptyDir represents a temporary directory that shares a Task's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir Either this OR PersistentVolumeClaim can be used.",
Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"),
},
},
"configMap": {
SchemaProps: spec.SchemaProps{
Description: "ConfigMap represents a configMap that should populate this workspace.",
Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"),
},
},
"secret": {
SchemaProps: spec.SchemaProps{
Description: "Secret represents a secret that should populate this workspace.",
Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"),
},
},
"projected": {
SchemaProps: spec.SchemaProps{
Description: "Projected represents a projected volume that should populate this workspace.",
Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"),
},
},
"csi": {
SchemaProps: spec.SchemaProps{
Description: "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.",
Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"),
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource"},
}
}
func schema_pkg_apis_pipeline_v1_WorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WorkspaceDeclaration is a declaration of a volume that a Task requires.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name by which you can bind the volume at runtime.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is an optional human readable description of this volume.",
Type: []string{"string"},
Format: "",
},
},
"mountPath": {
SchemaProps: spec.SchemaProps{
Description: "MountPath overrides the directory that the volume will be made available at.",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly dictates whether a mounted volume is writable. By default this field is false and so mounted volumes are writable.",
Type: []string{"boolean"},
Format: "",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Optional marks a Workspace as not being required in TaskRuns. By default this field is false and so declared workspaces are required.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
}
}
func schema_pkg_apis_pipeline_v1_WorkspacePipelineTaskBinding(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be mapped to a task's declared workspace.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the workspace as declared by the task",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"workspace": {
SchemaProps: spec.SchemaProps{
Description: "Workspace is the name of the workspace declared by the pipeline",
Type: []string{"string"},
Format: "",
},
},
"subPath": {
SchemaProps: spec.SchemaProps{
Description: "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
}
}
func schema_pkg_apis_pipeline_v1_WorkspaceUsage(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access to a Workspace defined in a Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the workspace this Step or Sidecar wants access to.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"mountPath": {
SchemaProps: spec.SchemaProps{
Description: "MountPath is the path that the workspace should be mounted to inside the Step or Sidecar, overriding any MountPath specified in the Task's WorkspaceDeclaration.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "mountPath"},
},
},
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/substitution"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/strings/slices"
"knative.dev/pkg/apis"
)
// ParamsPrefix is the prefix used in $(...) expressions referring to parameters
const ParamsPrefix = "params"
// ParamSpec defines arbitrary parameters needed beyond typed inputs (such as
// resources). Parameter values are provided by users as inputs on a TaskRun
// or PipelineRun.
type ParamSpec struct {
// Name declares the name by which a parameter is referenced.
Name string `json:"name"`
// Type is the user-specified type of the parameter. The possible types
// are currently "string", "array" and "object", and "string" is the default.
// +optional
Type ParamType `json:"type,omitempty"`
// Description is a user-facing description of the parameter that may be
// used to populate a UI.
// +optional
Description string `json:"description,omitempty"`
// Properties is the JSON Schema properties to support key-value pairs parameter.
// +optional
Properties map[string]PropertySpec `json:"properties,omitempty"`
// Default is the value a parameter takes if no input value is supplied. If
// default is set, a Task may be executed without a supplied value for the
// parameter.
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Default *ParamValue `json:"default,omitempty"`
// Enum declares a set of allowed param input values for tasks/pipelines that can be validated.
// If Enum is not set, no input validation is performed for the param.
// +optional
Enum []string `json:"enum,omitempty"`
}
// ParamSpecs is a list of ParamSpec
// +listType=atomic
type ParamSpecs []ParamSpec
// PropertySpec defines the struct for object keys
type PropertySpec struct {
Type ParamType `json:"type,omitempty"`
}
// SetDefaults set the default type
func (pp *ParamSpec) SetDefaults(context.Context) {
if pp == nil {
return
}
// Propagate inferred type to the parent ParamSpec's type, and default type to the PropertySpec's type
// The sequence to look at is type in ParamSpec -> properties -> type in default -> array/string/object value in default
// If neither `properties` or `default` section is provided, ParamTypeString will be the default type.
switch {
case pp.Type != "":
// If param type is provided by the author, do nothing but just set default type for PropertySpec in case `properties` section is provided.
pp.setDefaultsForProperties()
case pp.Properties != nil:
pp.Type = ParamTypeObject
// Also set default type for PropertySpec
pp.setDefaultsForProperties()
case pp.Default == nil:
// ParamTypeString is the default value (when no type can be inferred from the default value)
pp.Type = ParamTypeString
case pp.Default.Type != "":
pp.Type = pp.Default.Type
case pp.Default.ArrayVal != nil:
pp.Type = ParamTypeArray
case pp.Default.ObjectVal != nil:
pp.Type = ParamTypeObject
default:
pp.Type = ParamTypeString
}
}
// setDefaultsForProperties sets default type for PropertySpec (string) if it's not specified
func (pp *ParamSpec) setDefaultsForProperties() {
for key, propertySpec := range pp.Properties {
if propertySpec.Type == "" {
pp.Properties[key] = PropertySpec{Type: ParamTypeString}
}
}
}
// GetNames returns all the names of the declared parameters
func (ps ParamSpecs) GetNames() []string {
var names []string
for _, p := range ps {
names = append(names, p.Name)
}
return names
}
// SortByType splits the input params into string params, array params, and object params, in that order
func (ps ParamSpecs) SortByType() (ParamSpecs, ParamSpecs, ParamSpecs) {
var stringParams, arrayParams, objectParams ParamSpecs
for _, p := range ps {
switch p.Type {
case ParamTypeArray:
arrayParams = append(arrayParams, p)
case ParamTypeObject:
objectParams = append(objectParams, p)
case ParamTypeString:
fallthrough
default:
stringParams = append(stringParams, p)
}
}
return stringParams, arrayParams, objectParams
}
// ValidateNoDuplicateNames returns an error if any of the params have the same name
func (ps ParamSpecs) ValidateNoDuplicateNames() *apis.FieldError {
var errs *apis.FieldError
names := ps.GetNames()
for dup := range findDups(names) {
errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", dup))
}
return errs
}
// validateParamEnums validates feature flag, duplication and allowed types for Param Enum
func (ps ParamSpecs) validateParamEnums(ctx context.Context) *apis.FieldError {
var errs *apis.FieldError
for _, p := range ps {
if len(p.Enum) == 0 {
continue
}
if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableParamEnum {
errs = errs.Also(errs, apis.ErrGeneric(fmt.Sprintf("feature flag `%s` should be set to true to use Enum", config.EnableParamEnum), "").ViaKey(p.Name))
}
if p.Type != ParamTypeString {
errs = errs.Also(apis.ErrGeneric("enum can only be set with string type param", "").ViaKey(p.Name))
}
for dup := range findDups(p.Enum) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("parameter enum value %v appears more than once", dup), "").ViaKey(p.Name))
}
if p.Default != nil && p.Default.StringVal != "" {
if !slices.Contains(p.Enum, p.Default.StringVal) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("param default value %v not in the enum list", p.Default.StringVal), "").ViaKey(p.Name))
}
}
}
return errs
}
// findDups returns the duplicate element in the given slice
func findDups(vals []string) sets.String {
seen := sets.String{}
dups := sets.String{}
for _, val := range vals {
if seen.Has(val) {
dups.Insert(val)
}
seen.Insert(val)
}
return dups
}
// Param declares an ParamValues to use for the parameter called name.
type Param struct {
Name string `json:"name"`
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Value ParamValue `json:"value"`
}
// GetVarSubstitutionExpressions extracts all the value between "$(" and ")"" for a Parameter
func (p Param) GetVarSubstitutionExpressions() ([]string, bool) {
var allExpressions []string
switch p.Value.Type {
case ParamTypeArray:
// array type
for _, value := range p.Value.ArrayVal {
allExpressions = append(allExpressions, validateString(value)...)
}
case ParamTypeString:
// string type
allExpressions = append(allExpressions, validateString(p.Value.StringVal)...)
case ParamTypeObject:
// object type
for _, value := range p.Value.ObjectVal {
allExpressions = append(allExpressions, validateString(value)...)
}
default:
return nil, false
}
return allExpressions, len(allExpressions) != 0
}
// ExtractNames returns a set of unique names
func (ps Params) ExtractNames() sets.String {
names := sets.String{}
for _, p := range ps {
names.Insert(p.Name)
}
return names
}
func (ps Params) extractValues() []string {
pvs := []string{}
for i := range ps {
pvs = append(pvs, ps[i].Value.StringVal)
pvs = append(pvs, ps[i].Value.ArrayVal...)
for _, v := range ps[i].Value.ObjectVal {
pvs = append(pvs, v)
}
}
return pvs
}
// extractParamMapArrVals creates a param map with the key: param.Name and
// val: param.Value.ArrayVal
func (ps Params) extractParamMapArrVals() map[string][]string {
paramsMap := make(map[string][]string)
for _, p := range ps {
paramsMap[p.Name] = p.Value.ArrayVal
}
return paramsMap
}
// ParseTaskandResultName parses "task name", "result name" from a Matrix Context Variable
// Valid Example 1:
// - Input: tasks.myTask.matrix.length
// - Output: "myTask", ""
// Valid Example 2:
// - Input: tasks.myTask.matrix.ResultName.length
// - Output: "myTask", "ResultName"
func (p Param) ParseTaskandResultName() (string, string) {
if expressions, ok := p.GetVarSubstitutionExpressions(); ok {
for _, expression := range expressions {
subExpressions := strings.Split(expression, ".")
pipelineTaskName := subExpressions[1]
if len(subExpressions) == 4 {
return pipelineTaskName, ""
} else if len(subExpressions) == 5 {
resultName := subExpressions[3]
return pipelineTaskName, resultName
}
}
}
return "", ""
}
// Params is a list of Param
// +listType=atomic
type Params []Param
// ExtractParamArrayLengths extract and return the lengths of all array params
// Example of returned value: {"a-array-params": 2,"b-array-params": 2 }
func (ps Params) ExtractParamArrayLengths() map[string]int {
// Collect all array params
arrayParamsLengths := make(map[string]int)
// Collect array params lengths from params
for _, p := range ps {
if p.Value.Type == ParamTypeArray {
arrayParamsLengths[p.Name] = len(p.Value.ArrayVal)
}
}
return arrayParamsLengths
}
// validateDuplicateParameters checks if a parameter with the same name is defined more than once
func (ps Params) validateDuplicateParameters() (errs *apis.FieldError) {
taskParamNames := sets.NewString()
for i, param := range ps {
if taskParamNames.Has(param.Name) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("parameter names must be unique,"+
" the parameter \"%s\" is also defined at", param.Name), fmt.Sprintf("[%d].name", i)))
}
taskParamNames.Insert(param.Name)
}
return errs
}
// ReplaceVariables applies string, array and object replacements to variables in Params
func (ps Params) ReplaceVariables(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) Params {
params := ps.DeepCopy()
for i := range params {
params[i].Value.ApplyReplacements(stringReplacements, arrayReplacements, objectReplacements)
}
return params
}
// ExtractDefaultParamArrayLengths extract and return the lengths of all array params
// Example of returned value: {"a-array-params": 2,"b-array-params": 2 }
func (ps ParamSpecs) ExtractDefaultParamArrayLengths() map[string]int {
// Collect all array params
arrayParamsLengths := make(map[string]int)
// Collect array params lengths from defaults
for _, p := range ps {
if p.Default != nil {
if p.Default.Type == ParamTypeArray {
arrayParamsLengths[p.Name] = len(p.Default.ArrayVal)
}
}
}
return arrayParamsLengths
}
// extractArrayIndexingParamRefs takes a string of the form `foo-$(params.array-param[1])-bar` and extracts the portions of the string that reference an element in an array param.
// For example, for the string “foo-$(params.array-param[1])-bar-$(params.other-array-param[2])-$(params.string-param)`,
// it would return ["$(params.array-param[1])", "$(params.other-array-param[2])"].
func extractArrayIndexingParamRefs(paramReference string) []string {
l := []string{}
list := substitution.ExtractArrayIndexingParamsExpressions(paramReference)
for _, val := range list {
indexString := substitution.ExtractIndexString(val)
if indexString != "" {
l = append(l, val)
}
}
return l
}
// extractParamRefsFromSteps get all array indexing references from steps
func extractParamRefsFromSteps(steps []Step) []string {
paramsRefs := []string{}
for _, step := range steps {
paramsRefs = append(paramsRefs, step.Script)
container := step.ToK8sContainer()
paramsRefs = append(paramsRefs, extractParamRefsFromContainer(container)...)
}
return paramsRefs
}
// extractParamRefsFromStepTemplate get all array indexing references from StepsTemplate
func extractParamRefsFromStepTemplate(stepTemplate *StepTemplate) []string {
if stepTemplate == nil {
return nil
}
container := stepTemplate.ToK8sContainer()
return extractParamRefsFromContainer(container)
}
// extractParamRefsFromSidecars get all array indexing references from sidecars
func extractParamRefsFromSidecars(sidecars []Sidecar) []string {
paramsRefs := []string{}
for _, s := range sidecars {
paramsRefs = append(paramsRefs, s.Script)
container := s.ToK8sContainer()
paramsRefs = append(paramsRefs, extractParamRefsFromContainer(container)...)
}
return paramsRefs
}
// extractParamRefsFromVolumes get all array indexing references from volumes
func extractParamRefsFromVolumes(volumes []corev1.Volume) []string {
paramsRefs := []string{}
for i, v := range volumes {
paramsRefs = append(paramsRefs, v.Name)
if v.VolumeSource.ConfigMap != nil {
paramsRefs = append(paramsRefs, v.ConfigMap.Name)
for _, item := range v.ConfigMap.Items {
paramsRefs = append(paramsRefs, item.Key)
paramsRefs = append(paramsRefs, item.Path)
}
}
if v.VolumeSource.Secret != nil {
paramsRefs = append(paramsRefs, v.Secret.SecretName)
for _, item := range v.Secret.Items {
paramsRefs = append(paramsRefs, item.Key)
paramsRefs = append(paramsRefs, item.Path)
}
}
if v.PersistentVolumeClaim != nil {
paramsRefs = append(paramsRefs, v.PersistentVolumeClaim.ClaimName)
}
if v.Projected != nil {
for _, s := range volumes[i].Projected.Sources {
if s.ConfigMap != nil {
paramsRefs = append(paramsRefs, s.ConfigMap.Name)
}
if s.Secret != nil {
paramsRefs = append(paramsRefs, s.Secret.Name)
}
if s.ServiceAccountToken != nil {
paramsRefs = append(paramsRefs, s.ServiceAccountToken.Audience)
}
}
}
if v.CSI != nil {
if v.CSI.NodePublishSecretRef != nil {
paramsRefs = append(paramsRefs, v.CSI.NodePublishSecretRef.Name)
}
if v.CSI.VolumeAttributes != nil {
for _, value := range v.CSI.VolumeAttributes {
paramsRefs = append(paramsRefs, value)
}
}
}
}
return paramsRefs
}
// extractParamRefsFromContainer get all array indexing references from container
func extractParamRefsFromContainer(c *corev1.Container) []string {
paramsRefs := []string{}
paramsRefs = append(paramsRefs, c.Name)
paramsRefs = append(paramsRefs, c.Image)
paramsRefs = append(paramsRefs, string(c.ImagePullPolicy))
paramsRefs = append(paramsRefs, c.Args...)
for ie, e := range c.Env {
paramsRefs = append(paramsRefs, e.Value)
if c.Env[ie].ValueFrom != nil {
if e.ValueFrom.SecretKeyRef != nil {
paramsRefs = append(paramsRefs, e.ValueFrom.SecretKeyRef.LocalObjectReference.Name)
paramsRefs = append(paramsRefs, e.ValueFrom.SecretKeyRef.Key)
}
if e.ValueFrom.ConfigMapKeyRef != nil {
paramsRefs = append(paramsRefs, e.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name)
paramsRefs = append(paramsRefs, e.ValueFrom.ConfigMapKeyRef.Key)
}
}
}
for _, e := range c.EnvFrom {
paramsRefs = append(paramsRefs, e.Prefix)
if e.ConfigMapRef != nil {
paramsRefs = append(paramsRefs, e.ConfigMapRef.LocalObjectReference.Name)
}
if e.SecretRef != nil {
paramsRefs = append(paramsRefs, e.SecretRef.LocalObjectReference.Name)
}
}
paramsRefs = append(paramsRefs, c.WorkingDir)
paramsRefs = append(paramsRefs, c.Command...)
for _, v := range c.VolumeMounts {
paramsRefs = append(paramsRefs, v.Name)
paramsRefs = append(paramsRefs, v.MountPath)
paramsRefs = append(paramsRefs, v.SubPath)
}
return paramsRefs
}
// ParamType indicates the type of an input parameter;
// Used to distinguish between a single string and an array of strings.
type ParamType string
// Valid ParamTypes:
const (
ParamTypeString ParamType = "string"
ParamTypeArray ParamType = "array"
ParamTypeObject ParamType = "object"
)
// AllParamTypes can be used for ParamType validation.
var AllParamTypes = []ParamType{ParamTypeString, ParamTypeArray, ParamTypeObject}
// ParamValues is modeled after IntOrString in kubernetes/apimachinery:
// ParamValue is a type that can hold a single string, string array, or string map.
// Used in JSON unmarshalling so that a single JSON field can accept
// either an individual string or an array of strings.
type ParamValue struct {
Type ParamType // Represents the stored type of ParamValues.
StringVal string
// +listType=atomic
ArrayVal []string
ObjectVal map[string]string
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (paramValues *ParamValue) UnmarshalJSON(value []byte) error {
// ParamValues is used for Results Value as well, the results can be any kind of
// data so we need to check if it is empty.
if len(value) == 0 {
paramValues.Type = ParamTypeString
return nil
}
if value[0] == '[' {
// We're trying to Unmarshal to []string, but for cases like []int or other types
// of nested array which we don't support yet, we should continue and Unmarshal
// it to String. If the Type being set doesn't match what it actually should be,
// it will be captured by validation in reconciler.
// if failed to unmarshal to array, we will convert the value to string and marshal it to string
var a []string
if err := json.Unmarshal(value, &a); err == nil {
paramValues.Type = ParamTypeArray
paramValues.ArrayVal = a
return nil
}
}
if value[0] == '{' {
// if failed to unmarshal to map, we will convert the value to string and marshal it to string
var m map[string]string
if err := json.Unmarshal(value, &m); err == nil {
paramValues.Type = ParamTypeObject
paramValues.ObjectVal = m
return nil
}
}
// By default we unmarshal to string
paramValues.Type = ParamTypeString
if err := json.Unmarshal(value, ¶mValues.StringVal); err == nil {
return nil
}
paramValues.StringVal = string(value)
return nil
}
// MarshalJSON implements the json.Marshaller interface.
func (paramValues ParamValue) MarshalJSON() ([]byte, error) {
switch paramValues.Type {
case ParamTypeString:
return json.Marshal(paramValues.StringVal)
case ParamTypeArray:
return json.Marshal(paramValues.ArrayVal)
case ParamTypeObject:
return json.Marshal(paramValues.ObjectVal)
default:
return []byte{}, fmt.Errorf("impossible ParamValues.Type: %q", paramValues.Type)
}
}
// ApplyReplacements applyes replacements for ParamValues type
func (paramValues *ParamValue) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) {
switch paramValues.Type {
case ParamTypeArray:
newArrayVal := []string{}
for _, v := range paramValues.ArrayVal {
newArrayVal = append(newArrayVal, substitution.ApplyArrayReplacements(v, stringReplacements, arrayReplacements)...)
}
paramValues.ArrayVal = newArrayVal
case ParamTypeObject:
newObjectVal := map[string]string{}
for k, v := range paramValues.ObjectVal {
newObjectVal[k] = substitution.ApplyReplacements(v, stringReplacements)
}
paramValues.ObjectVal = newObjectVal
case ParamTypeString:
fallthrough
default:
paramValues.applyOrCorrect(stringReplacements, arrayReplacements, objectReplacements)
}
}
// applyOrCorrect deals with string param whose value can be string literal or a reference to a string/array/object param/result.
// If the value of paramValues is a reference to array or object, the type will be corrected from string to array/object.
func (paramValues *ParamValue) applyOrCorrect(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) {
stringVal := paramValues.StringVal
// if the stringVal is a string literal or a string that mixed with var references
// just do the normal string replacement
if !exactVariableSubstitutionRegex.MatchString(stringVal) {
paramValues.StringVal = substitution.ApplyReplacements(paramValues.StringVal, stringReplacements)
return
}
// trim the head "$(" and the tail ")" or "[*])"
// i.e. get "params.name" from "$(params.name)" or "$(params.name[*])"
trimedStringVal := substitution.StripStarVarSubExpression(stringVal)
// if the stringVal is a reference to a string param
if _, ok := stringReplacements[trimedStringVal]; ok {
paramValues.StringVal = substitution.ApplyReplacements(paramValues.StringVal, stringReplacements)
}
// if the stringVal is a reference to an array param, we need to change the type other than apply replacement
if _, ok := arrayReplacements[trimedStringVal]; ok {
paramValues.StringVal = ""
paramValues.ArrayVal = substitution.ApplyArrayReplacements(stringVal, stringReplacements, arrayReplacements)
paramValues.Type = ParamTypeArray
}
// if the stringVal is a reference an object param, we need to change the type other than apply replacement
if _, ok := objectReplacements[trimedStringVal]; ok {
paramValues.StringVal = ""
paramValues.ObjectVal = objectReplacements[trimedStringVal]
paramValues.Type = ParamTypeObject
}
}
// NewStructuredValues creates an ParamValues of type ParamTypeString or ParamTypeArray, based on
// how many inputs are given (>1 input will create an array, not string).
func NewStructuredValues(value string, values ...string) *ParamValue {
if len(values) > 0 {
return &ParamValue{
Type: ParamTypeArray,
ArrayVal: append([]string{value}, values...),
}
}
return &ParamValue{
Type: ParamTypeString,
StringVal: value,
}
}
// NewObject creates an ParamValues of type ParamTypeObject using the provided key-value pairs
func NewObject(pairs map[string]string) *ParamValue {
return &ParamValue{
Type: ParamTypeObject,
ObjectVal: pairs,
}
}
// ArrayReference returns the name of the parameter from array parameter reference
// returns arrayParam from $(params.arrayParam[*])
func ArrayReference(a string) string {
return strings.TrimSuffix(strings.TrimPrefix(a, "$("+ParamsPrefix+"."), "[*])")
}
// validatePipelineParametersVariablesInTaskParameters validates param value that
// may contain the reference(s) to other params to make sure those references are used appropriately.
func validatePipelineParametersVariablesInTaskParameters(params Params, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
errs = errs.Also(params.validateDuplicateParameters()).ViaField("params")
for _, param := range params {
switch param.Value.Type {
case ParamTypeArray:
for idx, arrayElement := range param.Value.ArrayVal {
errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("params", param.Name))
}
case ParamTypeObject:
for key, val := range param.Value.ObjectVal {
errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldKey("properties", key).ViaFieldKey("params", param.Name))
}
case ParamTypeString:
fallthrough
default:
errs = errs.Also(validateParamStringValue(param, prefix, paramNames, arrayParamNames, objectParamNameKeys))
}
}
return errs
}
// validateParamStringValue validates the param value field of string type
// that may contain references to other isolated array/object params other than string param.
func validateParamStringValue(param Param, prefix string, paramNames sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
stringValue := param.Value.StringVal
// if the provided param value is an isolated reference to the whole array/object, we just check if the param name exists.
isIsolated, errs := substitution.ValidateWholeArrayOrObjectRefInStringVariable(param.Name, stringValue, prefix, paramNames)
if isIsolated {
return errs
}
// if the provided param value is string literal and/or contains multiple variables
// valid example: "$(params.myString) and another $(params.myObject.key1)"
// invalid example: "$(params.myString) and another $(params.myObject[*])"
return validateStringVariable(stringValue, prefix, paramNames, arrayVars, objectParamNameKeys).ViaFieldKey("params", param.Name)
}
// validateStringVariable validates the normal string fields that can only accept references to string param or individual keys of object param
func validateStringVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError {
errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars)
errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys))
return errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(value, prefix, arrayVars))
}
func validateArrayVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError {
errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars)
errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys))
return errs.Also(substitution.ValidateVariableReferenceIsIsolated(value, prefix, arrayVars))
}
func validateObjectVariable(value, prefix string, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
objectNames := sets.NewString()
for objectParamName, keys := range objectParamNameKeys {
objectNames.Insert(objectParamName)
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...)))
}
return errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(value, prefix, objectNames))
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"knative.dev/pkg/apis"
)
var _ apis.Convertible = (*Pipeline)(nil)
// ConvertTo implements apis.Convertible
func (p *Pipeline) ConvertTo(ctx context.Context, sink apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
return fmt.Errorf("v1 is the highest known version, got: %T", sink)
}
// ConvertFrom implements apis.Convertible
func (p *Pipeline) ConvertFrom(ctx context.Context, source apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
return fmt.Errorf("v1 is the highest known version, got: %T", source)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"github.com/tektoncd/pipeline/pkg/apis/config"
"knative.dev/pkg/apis"
)
var _ apis.Defaultable = (*Pipeline)(nil)
// SetDefaults sets default values on the Pipeline's Spec
func (p *Pipeline) SetDefaults(ctx context.Context) {
p.Spec.SetDefaults(ctx)
}
// SetDefaults sets default values for the PipelineSpec's Params, Tasks, and Finally
func (ps *PipelineSpec) SetDefaults(ctx context.Context) {
for i := range ps.Params {
ps.Params[i].SetDefaults(ctx)
}
for _, pt := range ps.Tasks {
pt.SetDefaults(ctx)
}
for _, ft := range ps.Finally {
ctx := ctx // Ensure local scoping per Task
ft.SetDefaults(ctx)
}
}
// SetDefaults sets default values for a PipelineTask
func (pt *PipelineTask) SetDefaults(ctx context.Context) {
cfg := config.FromContextOrDefaults(ctx)
if pt.TaskRef != nil {
if pt.TaskRef.Name == "" && pt.TaskRef.Resolver == "" {
pt.TaskRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType)
}
if pt.TaskRef.Kind == "" && pt.TaskRef.Resolver == "" {
pt.TaskRef.Kind = NamespacedTaskKind
}
}
if pt.TaskSpec != nil {
pt.TaskSpec.SetDefaults(ctx)
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum"
"github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/kmeta"
)
// PipelineTaskOnErrorType defines a list of supported failure handling behaviors of a PipelineTask on error
type PipelineTaskOnErrorType string
const (
// PipelineTasksAggregateStatus is a param representing aggregate status of all dag pipelineTasks
PipelineTasksAggregateStatus = "tasks.status"
// PipelineTasks is a value representing a task is a member of "tasks" section of the pipeline
PipelineTasks = "tasks"
// PipelineFinallyTasks is a value representing a task is a member of "finally" section of the pipeline
PipelineFinallyTasks = "finally"
// PipelineTaskStopAndFail indicates to stop and fail the PipelineRun if the PipelineTask fails
PipelineTaskStopAndFail PipelineTaskOnErrorType = "stopAndFail"
// PipelineTaskContinue indicates to continue executing the rest of the DAG when the PipelineTask fails
PipelineTaskContinue PipelineTaskOnErrorType = "continue"
)
// +genclient
// +genclient:noStatus
// +genreconciler:krshapedlogic=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pipeline describes a list of Tasks to execute. It expresses how outputs
// of tasks feed into inputs of subsequent tasks.
// +k8s:openapi-gen=true
// +kubebuilder:storageversion
type Pipeline struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec holds the desired state of the Pipeline from the client
// +optional
Spec PipelineSpec `json:"spec"`
}
var _ kmeta.OwnerRefable = (*Pipeline)(nil)
// PipelineMetadata returns the Pipeline's ObjectMeta, implementing PipelineObject
func (p *Pipeline) PipelineMetadata() metav1.ObjectMeta {
return p.ObjectMeta
}
// PipelineSpec returns the Pipeline's Spec, implementing PipelineObject
func (p *Pipeline) PipelineSpec() PipelineSpec {
return p.Spec
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*Pipeline) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind(pipeline.PipelineControllerName)
}
// Checksum computes the sha256 checksum of the pipeline object.
// Prior to computing the checksum, it performs some preprocessing on the
// metadata of the object where it removes system provided annotations.
// Only the name, namespace, generateName, user-provided labels and annotations
// and the pipelineSpec are included for the checksum computation.
func (p *Pipeline) Checksum() ([]byte, error) {
objectMeta := checksum.PrepareObjectMeta(p)
preprocessedPipeline := Pipeline{
TypeMeta: metav1.TypeMeta{
APIVersion: "tekton.dev/v1",
Kind: "Pipeline"},
ObjectMeta: objectMeta,
Spec: p.Spec,
}
sha256Checksum, err := checksum.ComputeSha256Checksum(preprocessedPipeline)
if err != nil {
return nil, err
}
return sha256Checksum, nil
}
// PipelineSpec defines the desired state of Pipeline.
type PipelineSpec struct {
// DisplayName is a user-facing name of the pipeline that may be
// used to populate a UI.
// +optional
DisplayName string `json:"displayName,omitempty"`
// Description is a user-facing description of the pipeline that may be
// used to populate a UI.
// +optional
Description string `json:"description,omitempty"`
// Tasks declares the graph of Tasks that execute when this Pipeline is run.
// +listType=atomic
Tasks []PipelineTask `json:"tasks,omitempty"`
// Params declares a list of input parameters that must be supplied when
// this Pipeline is run.
Params ParamSpecs `json:"params,omitempty"`
// Workspaces declares a set of named workspaces that are expected to be
// provided by a PipelineRun.
// +optional
// +listType=atomic
Workspaces []PipelineWorkspaceDeclaration `json:"workspaces,omitempty"`
// Results are values that this pipeline can output once run
// +optional
// +listType=atomic
Results []PipelineResult `json:"results,omitempty"`
// Finally declares the list of Tasks that execute just before leaving the Pipeline
// i.e. either after all Tasks are finished executing successfully
// or after a failure which would result in ending the Pipeline
// +listType=atomic
Finally []PipelineTask `json:"finally,omitempty"`
}
// PipelineResult used to describe the results of a pipeline
type PipelineResult struct {
// Name the given name
Name string `json:"name"`
// Type is the user-specified type of the result.
// The possible types are 'string', 'array', and 'object', with 'string' as the default.
// 'array' and 'object' types are alpha features.
Type ResultsType `json:"type,omitempty"`
// Description is a human-readable description of the result
// +optional
Description string `json:"description"`
// Value the expression used to retrieve the value
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Value ResultValue `json:"value"`
}
// PipelineTaskMetadata contains the labels or annotations for an EmbeddedTask
type PipelineTaskMetadata struct {
// +optional
Labels map[string]string `json:"labels,omitempty"`
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
}
// EmbeddedTask is used to define a Task inline within a Pipeline's PipelineTasks.
type EmbeddedTask struct {
// +optional
runtime.TypeMeta `json:",inline,omitempty"`
// Spec is a specification of a custom task
// +optional
Spec runtime.RawExtension `json:"spec,omitempty"`
// +optional
Metadata PipelineTaskMetadata `json:"metadata,omitempty"`
// TaskSpec is a specification of a task
// +optional
TaskSpec `json:",inline,omitempty"`
}
// PipelineTask defines a task in a Pipeline, passing inputs from both
// Params and from the output of previous tasks.
type PipelineTask struct {
// Name is the name of this task within the context of a Pipeline. Name is
// used as a coordinate with the `from` and `runAfter` fields to establish
// the execution order of tasks relative to one another.
Name string `json:"name,omitempty"`
// DisplayName is the display name of this task within the context of a Pipeline.
// This display name may be used to populate a UI.
// +optional
DisplayName string `json:"displayName,omitempty"`
// Description is the description of this task within the context of a Pipeline.
// This description may be used to populate a UI.
// +optional
Description string `json:"description,omitempty"`
// TaskRef is a reference to a task definition.
// +optional
TaskRef *TaskRef `json:"taskRef,omitempty"`
// TaskSpec is a specification of a task
// Specifying TaskSpec can be disabled by setting
// `disable-inline-spec` feature flag.
// See Task.spec (API version: tekton.dev/v1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
TaskSpec *EmbeddedTask `json:"taskSpec,omitempty"`
// When is a list of when expressions that need to be true for the task to run
// +optional
When WhenExpressions `json:"when,omitempty"`
// Retries represents how many times this task should be retried in case of task failure: ConditionSucceeded set to False
// +optional
Retries int `json:"retries,omitempty"`
// RunAfter is the list of PipelineTask names that should be executed before
// this Task executes. (Used to force a specific ordering in graph execution.)
// +optional
// +listType=atomic
RunAfter []string `json:"runAfter,omitempty"`
// Parameters declares parameters passed to this task.
// +optional
Params Params `json:"params,omitempty"`
// Matrix declares parameters used to fan out this task.
// +optional
Matrix *Matrix `json:"matrix,omitempty"`
// Workspaces maps workspaces from the pipeline spec to the workspaces
// declared in the Task.
// +optional
// +listType=atomic
Workspaces []WorkspacePipelineTaskBinding `json:"workspaces,omitempty"`
// Duration after which the TaskRun times out. Defaults to 1 hour.
// Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// PipelineRef is a reference to a pipeline definition
// Note: PipelineRef is in preview mode and not yet supported
// +optional
PipelineRef *PipelineRef `json:"pipelineRef,omitempty"`
// PipelineSpec is a specification of a pipeline
// Note: PipelineSpec is in preview mode and not yet supported
// Specifying PipelineSpec can be disabled by setting
// `disable-inline-spec` feature flag.
// See Pipeline.spec (API version: tekton.dev/v1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
PipelineSpec *PipelineSpec `json:"pipelineSpec,omitempty"`
// OnError defines the exiting behavior of a PipelineRun on error
// can be set to [ continue | stopAndFail ]
// +optional
OnError PipelineTaskOnErrorType `json:"onError,omitempty"`
}
// IsCustomTask checks whether an embedded TaskSpec is a Custom Task
func (et *EmbeddedTask) IsCustomTask() bool {
// Note that if `apiVersion` is set to `"tekton.dev/v1beta1"` and `kind` is set to `"Task"`,
// the reference will be considered a Custom Task - https://github.com/tektoncd/pipeline/issues/6457
return et != nil && et.APIVersion != "" && et.Kind != ""
}
// IsMatrixed return whether pipeline task is matrixed
func (pt *PipelineTask) IsMatrixed() bool {
return pt.Matrix.HasParams() || pt.Matrix.HasInclude()
}
// TaskSpecMetadata returns the metadata of the PipelineTask's EmbeddedTask spec.
func (pt *PipelineTask) TaskSpecMetadata() PipelineTaskMetadata {
return pt.TaskSpec.Metadata
}
// HashKey is the name of the PipelineTask, and is used as the key for this PipelineTask in the DAG
func (pt PipelineTask) HashKey() string {
return pt.Name
}
// Deps returns all other PipelineTask dependencies of this PipelineTask, based on resource usage or ordering
func (pt PipelineTask) Deps() []string {
// hold the list of dependencies in a set to avoid duplicates
deps := sets.NewString()
// add any new dependents from result references - resource dependency
for _, ref := range PipelineTaskResultRefs(&pt) {
deps.Insert(ref.PipelineTask)
}
// add any new dependents from runAfter - order dependency
for _, runAfter := range pt.RunAfter {
deps.Insert(runAfter)
}
return deps.List()
}
// PipelineTaskList is a list of PipelineTasks
type PipelineTaskList []PipelineTask
// Deps returns a map with key as name of a pipelineTask and value as a list of its dependencies
func (l PipelineTaskList) Deps() map[string][]string {
deps := map[string][]string{}
for _, pt := range l {
// get the list of deps for this pipelineTask
d := pt.Deps()
// add the pipelineTask into the map if it has any deps
if len(d) > 0 {
deps[pt.HashKey()] = d
}
}
return deps
}
// Items returns a slice of all tasks in the PipelineTaskList, converted to dag.Tasks
func (l PipelineTaskList) Items() []dag.Task {
tasks := []dag.Task{}
for _, t := range l {
tasks = append(tasks, dag.Task(t))
}
return tasks
}
// Names returns a set of pipeline task names from the given list of pipeline tasks
func (l PipelineTaskList) Names() sets.String {
names := sets.String{}
for _, pt := range l {
names.Insert(pt.Name)
}
return names
}
// PipelineTaskParam is used to provide arbitrary string parameters to a Task.
type PipelineTaskParam struct {
Name string `json:"name"`
Value string `json:"value"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PipelineList contains a list of Pipeline
type PipelineList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []Pipeline `json:"items"`
}
// GetVarSubstitutionExpressions extracts all the value between "$(" and ")"" for a PipelineResult
func (result PipelineResult) GetVarSubstitutionExpressions() ([]string, bool) {
allExpressions := validateString(result.Value.StringVal)
for _, v := range result.Value.ArrayVal {
allExpressions = append(allExpressions, validateString(v)...)
}
for _, v := range result.Value.ObjectVal {
allExpressions = append(allExpressions, validateString(v)...)
}
return allExpressions, len(allExpressions) != 0
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"slices"
"strings"
"github.com/tektoncd/pipeline/internal/artifactref"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
"github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag"
"github.com/tektoncd/pipeline/pkg/substitution"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var (
_ apis.Validatable = (*Pipeline)(nil)
_ resourcesemantics.VerbLimited = (*Pipeline)(nil)
)
const (
taskRef = "taskRef"
taskSpec = "taskSpec"
pipelineRef = "pipelineRef"
pipelineSpec = "pipelineSpec"
)
// SupportedVerbs returns the operations that validation should be called for
func (p *Pipeline) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate checks that the Pipeline structure is valid but does not validate
// that any references resources exist, that is done at run time.
func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError {
errs := validate.ObjectMetadata(p.GetObjectMeta()).ViaField("metadata")
errs = errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec"))
// When a Pipeline is created directly, instead of declared inline in a PipelineRun,
// we do not support propagated parameters and workspaces.
// Validate that all params and workspaces it uses are declared.
errs = errs.Also(p.Spec.validatePipelineParameterUsage(ctx).ViaField("spec"))
errs = errs.Also(p.Spec.validatePipelineWorkspacesUsage().ViaField("spec"))
return errs
}
// Validate checks that taskNames in the Pipeline are valid and that the graph
// of Tasks expressed in the Pipeline makes sense.
func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(ps.ValidateBetaFields(ctx))
if equality.Semantic.DeepEqual(ps, &PipelineSpec{}) {
errs = errs.Also(apis.ErrGeneric("expected at least one, got none", "description", "params", "resources", "tasks", "workspaces"))
}
// PipelineTask must have a valid unique label and at least one of taskRef or taskSpec should be specified
errs = errs.Also(ValidatePipelineTasks(ctx, ps.Tasks, ps.Finally))
// Validate the pipeline task graph
errs = errs.Also(validateGraph(ps.Tasks))
// The parameter variables should be valid
errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.Tasks, ps.Params).ViaField("tasks"))
errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.Finally, ps.Params).ViaField("finally"))
errs = errs.Also(validatePipelineContextVariables(ps.Tasks).ViaField("tasks"))
errs = errs.Also(validatePipelineContextVariables(ps.Finally).ViaField("finally"))
errs = errs.Also(validateExecutionStatusVariables(ps.Tasks, ps.Finally))
// Validate the pipeline's workspaces.
errs = errs.Also(validatePipelineWorkspacesDeclarations(ps.Workspaces))
// Validate the pipeline's results
errs = errs.Also(validatePipelineResults(ps.Results, ps.Tasks, ps.Finally))
errs = errs.Also(validateTasksAndFinallySection(ps))
errs = errs.Also(validateFinalTasks(ps.Tasks, ps.Finally))
errs = errs.Also(validateWhenExpressions(ctx, ps.Tasks, ps.Finally))
errs = errs.Also(validateArtifactReference(ctx, ps.Tasks, ps.Finally))
errs = errs.Also(validateMatrix(ctx, ps.Tasks).ViaField("tasks"))
errs = errs.Also(validateMatrix(ctx, ps.Finally).ViaField("finally"))
return errs
}
// ValidateBetaFields returns an error if the Pipeline spec uses beta features but does not
// have "enable-api-fields" set to "alpha" or "beta".
func (ps *PipelineSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError {
var errs *apis.FieldError
for i, pt := range ps.Tasks {
errs = errs.Also(pt.validateBetaFields(ctx).ViaFieldIndex("tasks", i))
}
for i, pt := range ps.Finally {
errs = errs.Also(pt.validateBetaFields(ctx).ViaFieldIndex("tasks", i))
}
return errs
}
// validateBetaFields returns an error if the PipelineTask uses beta features but does not
// have "enable-api-fields" set to "alpha" or "beta".
func (pt *PipelineTask) validateBetaFields(ctx context.Context) *apis.FieldError {
var errs *apis.FieldError
if pt.TaskRef != nil {
// Resolvers
if pt.TaskRef.Resolver != "" {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "taskref.resolver", config.BetaAPIFields))
}
if len(pt.TaskRef.Params) > 0 {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "taskref.params", config.BetaAPIFields))
}
}
return errs
}
// ValidatePipelineTasks ensures that pipeline tasks has unique label, pipeline tasks has specified one of
// taskRef or taskSpec, and in case of a pipeline task with taskRef, it has a reference to a valid task (task name)
func ValidatePipelineTasks(ctx context.Context, tasks []PipelineTask, finalTasks []PipelineTask) *apis.FieldError {
taskNames := sets.NewString()
var errs *apis.FieldError
errs = errs.Also(PipelineTaskList(tasks).Validate(ctx, taskNames, "tasks"))
errs = errs.Also(PipelineTaskList(finalTasks).Validate(ctx, taskNames, "finally"))
return errs
}
// Validate a list of pipeline tasks including custom task
func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, path string) (errs *apis.FieldError) {
for i, t := range l {
// validate pipeline task name
errs = errs.Also(t.ValidateName().ViaFieldIndex(path, i))
// names cannot be duplicated - checking that pipelineTask names are unique
if _, ok := taskNames[t.Name]; ok {
errs = errs.Also(apis.ErrMultipleOneOf("name").ViaFieldIndex(path, i))
}
taskNames.Insert(t.Name)
// validate custom task, dag, or final task
errs = errs.Also(t.Validate(ctx).ViaFieldIndex(path, i))
}
return errs
}
// validateUsageOfDeclaredPipelineTaskParameters validates that all parameters referenced in the pipeline Task are declared by the pipeline Task.
func (l PipelineTaskList) validateUsageOfDeclaredPipelineTaskParameters(ctx context.Context, additionalParams []ParamSpec, path string) (errs *apis.FieldError) {
for i, t := range l {
if t.TaskSpec != nil {
errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.TaskSpec.Steps, append(t.TaskSpec.Params, additionalParams...)).ViaFieldIndex(path, i))
}
}
return errs
}
// ValidateName checks whether the PipelineTask's name is a valid DNS label
func (pt PipelineTask) ValidateName() *apis.FieldError {
if err := validation.IsDNS1123Label(pt.Name); len(err) > 0 {
return &apis.FieldError{
Message: fmt.Sprintf("invalid value %q", pt.Name),
Paths: []string{"name"},
Details: "Pipeline Task name must be a valid DNS Label." +
"For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
}
}
return nil
}
// Validate classifies whether a task is a custom task or a regular task(dag/final)
// calls the validation routine based on the type of the task
func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(pt.validateRefOrSpec(ctx))
errs = errs.Also(pt.validateEnabledInlineSpec(ctx))
errs = errs.Also(pt.validateEmbeddedOrType())
// taskKinds contains the kinds when the apiVersion is not set, they are not custom tasks,
// if apiVersion is set they are custom tasks.
taskKinds := map[TaskKind]bool{
"": true,
NamespacedTaskKind: true,
}
errs = errs.Also(pt.ValidateOnError(ctx))
// Pipeline task having taskRef/taskSpec with APIVersion is classified as custom task
switch {
case pt.TaskRef != nil && !taskKinds[pt.TaskRef.Kind]:
errs = errs.Also(pt.validateCustomTask())
case pt.TaskRef != nil && pt.TaskRef.APIVersion != "":
errs = errs.Also(pt.validateCustomTask())
case pt.TaskSpec != nil && !taskKinds[TaskKind(pt.TaskSpec.Kind)]:
errs = errs.Also(pt.validateCustomTask())
case pt.TaskSpec != nil && pt.TaskSpec.APIVersion != "":
errs = errs.Also(pt.validateCustomTask())
default:
errs = errs.Also(pt.validateTask(ctx))
}
return errs
}
// ValidateOnError validates the OnError field of a PipelineTask
func (pt PipelineTask) ValidateOnError(ctx context.Context) (errs *apis.FieldError) {
if pt.OnError != "" && !isParamRefs(string(pt.OnError)) {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "OnError", config.BetaAPIFields))
if pt.OnError != PipelineTaskContinue && pt.OnError != PipelineTaskStopAndFail {
errs = errs.Also(apis.ErrInvalidValue(pt.OnError, "OnError", "PipelineTask OnError must be either \"continue\" or \"stopAndFail\""))
}
if pt.OnError == PipelineTaskContinue && pt.Retries > 0 {
errs = errs.Also(apis.ErrGeneric("PipelineTask OnError cannot be set to \"continue\" when Retries is greater than 0"))
}
}
return errs
}
func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldError) {
if pt.IsMatrixed() {
// This is a beta feature and will fail validation if it's used in a pipeline spec
// when the enable-api-fields feature gate is set to "stable".
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "matrix", config.BetaAPIFields))
errs = errs.Also(pt.Matrix.validateCombinationsCount(ctx))
errs = errs.Also(pt.Matrix.validateUniqueParams())
}
errs = errs.Also(pt.Matrix.validateParameterInOneOfMatrixOrParams(pt.Params))
return errs
}
func (pt PipelineTask) validateEmbeddedOrType() (errs *apis.FieldError) {
// Reject cases where APIVersion and/or Kind are specified alongside an embedded Task.
// We determine if this is an embedded Task by checking of TaskSpec.TaskSpec.Steps has items.
if pt.TaskSpec != nil && len(pt.TaskSpec.TaskSpec.Steps) > 0 {
if pt.TaskSpec.APIVersion != "" {
errs = errs.Also(&apis.FieldError{
Message: "taskSpec.apiVersion cannot be specified when using taskSpec.steps",
Paths: []string{"taskSpec.apiVersion"},
})
}
if pt.TaskSpec.Kind != "" {
errs = errs.Also(&apis.FieldError{
Message: "taskSpec.kind cannot be specified when using taskSpec.steps",
Paths: []string{"taskSpec.kind"},
})
}
}
return
}
func (pt *PipelineTask) validateWorkspaces(workspaceNames sets.String) (errs *apis.FieldError) {
workspaceBindingNames := sets.NewString()
for i, ws := range pt.Workspaces {
if workspaceBindingNames.Has(ws.Name) {
errs = errs.Also(apis.ErrGeneric(
fmt.Sprintf("workspace name %q must be unique", ws.Name), "").ViaFieldIndex("workspaces", i))
}
if ws.Workspace == "" {
if !workspaceNames.Has(ws.Name) {
errs = errs.Also(apis.ErrInvalidValue(
fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Name),
"",
).ViaFieldIndex("workspaces", i))
}
} else if !workspaceNames.Has(ws.Workspace) {
errs = errs.Also(apis.ErrInvalidValue(
fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Workspace),
"",
).ViaFieldIndex("workspaces", i))
}
workspaceBindingNames.Insert(ws.Name)
}
return errs
}
// validateEnabledInlineSpec validates that pipelineSpec or taskSpec is allowed by checking
// disable-inline-spec field
func (pt PipelineTask) validateEnabledInlineSpec(ctx context.Context) (errs *apis.FieldError) {
if pt.TaskSpec != nil {
if slices.Contains(strings.Split(
config.FromContextOrDefaults(ctx).FeatureFlags.DisableInlineSpec, ","), "pipeline") {
errs = errs.Also(apis.ErrDisallowedFields("taskSpec"))
}
}
if pt.PipelineSpec != nil {
if slices.Contains(strings.Split(
config.FromContextOrDefaults(ctx).FeatureFlags.DisableInlineSpec, ","), "pipeline") {
errs = errs.Also(apis.ErrDisallowedFields("pipelineSpec"))
}
}
return errs
}
// validateRefOrSpec validates at least one of taskRef or taskSpec or pipelineRef or pipelineSpec is specified
func (pt PipelineTask) validateRefOrSpec(ctx context.Context) (errs *apis.FieldError) {
// collect all the specified specifications
nonNilFields := []string{}
if pt.TaskRef != nil {
nonNilFields = append(nonNilFields, taskRef)
}
if pt.TaskSpec != nil {
nonNilFields = append(nonNilFields, taskSpec)
}
if pt.PipelineRef != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, pipelineRef, config.AlphaAPIFields))
nonNilFields = append(nonNilFields, pipelineRef)
}
if pt.PipelineSpec != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, pipelineSpec, config.AlphaAPIFields))
nonNilFields = append(nonNilFields, pipelineSpec)
}
// check the length of nonNilFields
// if one of taskRef or taskSpec or pipelineRef or pipelineSpec is specified,
// the length of nonNilFields should exactly be 1
if len(nonNilFields) > 1 {
errs = errs.Also(apis.ErrGeneric("expected exactly one, got multiple", nonNilFields...))
} else if len(nonNilFields) == 0 {
cfg := config.FromContextOrDefaults(ctx)
// check for TaskRef or TaskSpec or PipelineRef or PipelineSpec with alpha feature flag
if cfg.FeatureFlags.EnableAPIFields == config.AlphaAPIFields {
errs = errs.Also(apis.ErrMissingOneOf(taskRef, taskSpec, pipelineRef, pipelineSpec))
} else {
// check for taskRef and taskSpec with beta/stable feature flag
errs = errs.Also(apis.ErrMissingOneOf(taskRef, taskSpec))
}
}
return errs
}
// validateCustomTask validates custom task specifications - checking kind and fail if not yet supported features specified
func (pt PipelineTask) validateCustomTask() (errs *apis.FieldError) {
if pt.TaskRef != nil && pt.TaskRef.Kind == "" {
errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify kind", "taskRef.kind"))
}
if pt.TaskSpec != nil && pt.TaskSpec.Kind == "" {
errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify kind", "taskSpec.kind"))
}
if pt.TaskRef != nil && pt.TaskRef.APIVersion == "" {
errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify apiVersion", "taskRef.apiVersion"))
}
if pt.TaskSpec != nil && pt.TaskSpec.APIVersion == "" {
errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify apiVersion", "taskSpec.apiVersion"))
}
return errs
}
// validateTask validates a pipeline task or a final task for taskRef and taskSpec
func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) {
// Validate TaskSpec if it's present
if pt.TaskSpec != nil {
errs = errs.Also(pt.TaskSpec.Validate(ctx).ViaField(taskSpec))
}
if pt.TaskRef != nil {
errs = errs.Also(pt.TaskRef.Validate(ctx).ViaField(taskRef))
}
if pt.PipelineRef != nil {
errs = errs.Also(pt.PipelineRef.Validate(ctx).ViaField(pipelineRef))
}
if pt.PipelineSpec != nil {
errs = errs.Also(pt.PipelineSpec.Validate(ctx).ViaField(pipelineSpec))
}
return errs
}
// validatePipelineWorkspacesDeclarations validates the specified workspaces, ensuring having unique name without any
// empty string,
func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration) (errs *apis.FieldError) {
// Workspace names must be non-empty and unique.
wsTable := sets.NewString()
for i, ws := range wss {
if ws.Name == "" {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("workspace %d has empty name", i),
"").ViaFieldIndex("workspaces", i))
}
if wsTable.Has(ws.Name) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("workspace with name %q appears more than once", ws.Name),
"").ViaFieldIndex("workspaces", i))
}
wsTable.Insert(ws.Name)
}
return errs
}
// validatePipelineParameterUsage validates that parameters referenced in the Pipeline are declared by the Pipeline
func (ps *PipelineSpec) validatePipelineParameterUsage(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(PipelineTaskList(ps.Tasks).validateUsageOfDeclaredPipelineTaskParameters(ctx, ps.Params, "tasks"))
errs = errs.Also(PipelineTaskList(ps.Finally).validateUsageOfDeclaredPipelineTaskParameters(ctx, ps.Params, "finally"))
errs = errs.Also(validatePipelineTaskParameterUsage(ps.Tasks, ps.Params).ViaField("tasks"))
errs = errs.Also(validatePipelineTaskParameterUsage(ps.Finally, ps.Params).ViaField("finally"))
return errs
}
// validatePipelineTaskParameterUsage validates that parameters referenced in the Pipeline Tasks are declared by the Pipeline
func validatePipelineTaskParameterUsage(tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) {
allParamNames := sets.NewString(params.GetNames()...)
_, arrayParams, objectParams := params.SortByType()
arrayParamNames := sets.NewString(arrayParams.GetNames()...)
objectParameterNameKeys := map[string][]string{}
for _, p := range objectParams {
for k := range p.Properties {
objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k)
}
}
errs = errs.Also(validatePipelineParametersVariables(tasks, "params", allParamNames, arrayParamNames, objectParameterNameKeys))
for i, task := range tasks {
errs = errs.Also(task.Params.validateDuplicateParameters().ViaField("params").ViaIndex(i))
}
return errs
}
// validatePipelineWorkspacesUsage validates that workspaces referenced in the Pipeline are declared by the Pipeline
func (ps *PipelineSpec) validatePipelineWorkspacesUsage() (errs *apis.FieldError) {
errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks"))
errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally"))
return errs
}
// validatePipelineTasksWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in
// the pipeline
func validatePipelineTasksWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) {
workspaceNames := sets.NewString()
for _, ws := range wss {
workspaceNames.Insert(ws.Name)
}
// Any workspaces used in PipelineTasks should have their name declared in the Pipeline's Workspaces list.
for i, pt := range pts {
errs = errs.Also(pt.validateWorkspaces(workspaceNames).ViaIndex(i))
}
return errs
}
// ValidatePipelineParameterVariables validates parameters with those specified by each pipeline task,
// (1) it validates the type of parameter is either string or array (2) parameter default value matches
// with the type of that param (3) no duplication, feature flag and allowed param type when using param enum
func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) {
// validates all the types within a slice of ParamSpecs
errs = errs.Also(ValidateParameterTypes(ctx, params).ViaField("params"))
errs = errs.Also(params.ValidateNoDuplicateNames())
errs = errs.Also(params.validateParamEnums(ctx).ViaField("params"))
for i, task := range tasks {
errs = errs.Also(task.Params.validateDuplicateParameters().ViaField("params").ViaIndex(i))
}
return errs
}
func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
for idx, task := range tasks {
errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx))
if task.IsMatrixed() {
errs = errs.Also(task.Matrix.validatePipelineParametersVariablesInMatrixParameters(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx))
}
errs = errs.Also(task.When.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx))
}
return errs
}
func validatePipelineContextVariables(tasks []PipelineTask) *apis.FieldError {
pipelineRunContextNames := sets.NewString().Insert(
"name",
"namespace",
"uid",
)
pipelineContextNames := sets.NewString().Insert(
"name",
)
pipelineTaskContextNames := sets.NewString().Insert(
"retries",
)
var paramValues []string
for _, task := range tasks {
paramValues = task.extractAllParams().extractValues()
}
errs := validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipelineRun", pipelineRunContextNames).
Also(validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipeline", pipelineContextNames)).
Also(validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipelineTask", pipelineTaskContextNames))
return errs
}
// extractAllParams extracts all the parameters in a PipelineTask:
// - pt.Params
// - pt.Matrix.Params
// - pt.Matrix.Include.Params
func (pt *PipelineTask) extractAllParams() Params {
allParams := pt.Params
if pt.Matrix.HasParams() {
allParams = append(allParams, pt.Matrix.Params...)
}
if pt.Matrix.HasInclude() {
for _, include := range pt.Matrix.Include {
allParams = append(allParams, include.Params...)
}
}
return allParams
}
// GetVarSubstitutionExpressions extract all values between the parameters "$(" and ")" of steps and sidecars
func (pt *PipelineTask) GetVarSubstitutionExpressions() []string {
var allExpressions []string
if pt.TaskSpec != nil {
for _, step := range pt.TaskSpec.Steps {
stepExpressions := step.GetVarSubstitutionExpressions()
allExpressions = append(allExpressions, stepExpressions...)
}
for _, sidecar := range pt.TaskSpec.Sidecars {
sidecarExpressions := sidecar.GetVarSubstitutionExpressions()
allExpressions = append(allExpressions, sidecarExpressions...)
}
}
return allExpressions
}
// containsExecutionStatusRef checks if a specified param has a reference to execution status or reason
// $(tasks.<task-name>.status), $(tasks.status), or $(tasks.<task-name>.reason)
func containsExecutionStatusRef(p string) bool {
if strings.HasPrefix(p, "tasks.") {
if strings.HasSuffix(p, ".status") || strings.HasSuffix(p, ".reason") {
return true
}
}
return false
}
func validateExecutionStatusVariables(tasks []PipelineTask, finallyTasks []PipelineTask) (errs *apis.FieldError) {
errs = errs.Also(validateExecutionStatusVariablesInTasks(tasks).ViaField("tasks"))
errs = errs.Also(validateExecutionStatusVariablesInFinally(PipelineTaskList(tasks).Names(), finallyTasks).ViaField("finally"))
return errs
}
// validate dag pipeline tasks, task params can not access execution status of any other task
// dag tasks cannot have param value as $(tasks.pipelineTask.status)
func validateExecutionStatusVariablesInTasks(tasks []PipelineTask) (errs *apis.FieldError) {
for idx, t := range tasks {
errs = errs.Also(t.validateExecutionStatusVariablesDisallowed().ViaIndex(idx))
}
return errs
}
// validate finally tasks accessing execution status of a dag task specified in the pipeline
// $(tasks.pipelineTask.status) is invalid if pipelineTask is not defined as a dag task
func validateExecutionStatusVariablesInFinally(tasksNames sets.String, finally []PipelineTask) (errs *apis.FieldError) {
for idx, t := range finally {
errs = errs.Also(t.validateExecutionStatusVariablesAllowed(tasksNames).ViaIndex(idx))
}
return errs
}
func (pt *PipelineTask) validateExecutionStatusVariablesDisallowed() (errs *apis.FieldError) {
for _, param := range pt.Params {
if expressions, ok := param.GetVarSubstitutionExpressions(); ok {
errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, "value").
ViaFieldKey("params", param.Name))
}
}
for i, we := range pt.When {
if expressions, ok := we.GetVarSubstitutionExpressions(); ok {
errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, "").
ViaFieldIndex("when", i))
}
}
return errs
}
func (pt *PipelineTask) validateExecutionStatusVariablesAllowed(ptNames sets.String) (errs *apis.FieldError) {
for _, param := range pt.Params {
if expressions, ok := param.GetVarSubstitutionExpressions(); ok {
errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, "value").
ViaFieldKey("params", param.Name))
}
}
for i, we := range pt.When {
if expressions, ok := we.GetVarSubstitutionExpressions(); ok {
errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, "").
ViaFieldIndex("when", i))
}
}
return errs
}
func validateContainsExecutionStatusVariablesDisallowed(expressions []string, path string) (errs *apis.FieldError) {
if containsExecutionStatusReferences(expressions) {
errs = errs.Also(apis.ErrInvalidValue("pipeline tasks can not refer to execution status"+
" of any other pipeline task or aggregate status of tasks", path))
}
return errs
}
func containsExecutionStatusReferences(expressions []string) bool {
// validate tasks.pipelineTask.status/tasks.status if this expression is not a result reference
if !LooksLikeContainsResultRefs(expressions) {
for _, e := range expressions {
// check if it contains context variable accessing execution status - $(tasks.taskname.status)
// or an aggregate status - $(tasks.status) or reason - $(tasks.taskname.reason)
if containsExecutionStatusRef(e) {
return true
}
}
}
return false
}
func validateExecutionStatusVariablesExpressions(expressions []string, ptNames sets.String, fieldPath string) (errs *apis.FieldError) {
// validate tasks.pipelineTask.status if this expression is not a result reference
if !LooksLikeContainsResultRefs(expressions) {
for _, expression := range expressions {
// its a reference to aggregate status of dag tasks - $(tasks.status)
if expression == PipelineTasksAggregateStatus {
continue
}
// check if it contains context variable accessing execution status - $(tasks.taskname.status) | $(tasks.taskname.reason)
if containsExecutionStatusRef(expression) {
var pt string
if strings.HasSuffix(expression, ".status") {
// strip tasks. and .status from tasks.taskname.status to further verify task name
pt = strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".status")
}
if strings.HasSuffix(expression, ".reason") {
// strip tasks. and .reason from tasks.taskname.reason to further verify task name
pt = strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".reason")
}
// report an error if the task name does not exist in the list of dag tasks
if !ptNames.Has(pt) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline task %s is not defined in the pipeline", pt), fieldPath))
}
}
}
}
return errs
}
func validatePipelineContextVariablesInParamValues(paramValues []string, prefix string, contextNames sets.String) (errs *apis.FieldError) {
for _, paramValue := range paramValues {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(paramValue, prefix, contextNames).ViaField("value"))
}
return errs
}
func filter(arr []string, cond func(string) bool) []string {
result := []string{}
for i := range arr {
if cond(arr[i]) {
result = append(result, arr[i])
}
}
return result
}
// validatePipelineResults ensure that pipeline result variables are properly configured
func validatePipelineResults(results []PipelineResult, tasks []PipelineTask, finally []PipelineTask) (errs *apis.FieldError) {
pipelineTaskNames := getPipelineTasksNames(tasks)
pipelineFinallyTaskNames := getPipelineTasksNames(finally)
for idx, result := range results {
expressions, ok := result.GetVarSubstitutionExpressions()
if !ok {
errs = errs.Also(apis.ErrInvalidValue("expected pipeline results to be task result expressions but no expressions were found",
"value").ViaFieldIndex("results", idx))
}
if !LooksLikeContainsResultRefs(expressions) {
errs = errs.Also(apis.ErrInvalidValue("expected pipeline results to be task result expressions but an invalid expressions was found",
"value").ViaFieldIndex("results", idx))
}
expressions = filter(expressions, resultref.LooksLikeResultRef)
resultRefs := NewResultRefs(expressions)
if len(expressions) != len(resultRefs) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs),
"value").ViaFieldIndex("results", idx))
}
if !taskContainsResult(result.Value.StringVal, pipelineTaskNames, pipelineFinallyTaskNames) {
errs = errs.Also(apis.ErrInvalidValue("referencing a nonexistent task",
"value").ViaFieldIndex("results", idx))
}
}
return errs
}
// put task names in a set
func getPipelineTasksNames(pipelineTasks []PipelineTask) sets.String {
pipelineTaskNames := make(sets.String)
for _, pipelineTask := range pipelineTasks {
pipelineTaskNames.Insert(pipelineTask.Name)
}
return pipelineTaskNames
}
// taskContainsResult ensures the result value is referenced within the
// task names
func taskContainsResult(resultExpression string, pipelineTaskNames sets.String, pipelineFinallyTaskNames sets.String) bool {
// split incase of multiple resultExpressions in the same result.Value string
// i.e "$(task.<task-name).result.<result-name>) - $(task2.<task2-name).result2.<result2-name>)"
split := strings.Split(resultExpression, "$")
for _, expression := range split {
if expression != "" {
value := stripVarSubExpression("$" + expression)
pr, err := resultref.ParseTaskExpression(value)
if err != nil {
return false
}
if strings.HasPrefix(value, "tasks") && !pipelineTaskNames.Has(pr.ResourceName) {
return false
}
if strings.HasPrefix(value, "finally") && !pipelineFinallyTaskNames.Has(pr.ResourceName) {
return false
}
}
}
return true
}
func validateTasksAndFinallySection(ps *PipelineSpec) *apis.FieldError {
if len(ps.Finally) != 0 && len(ps.Tasks) == 0 {
return apis.ErrInvalidValue(fmt.Sprintf("spec.tasks is empty but spec.finally has %d tasks", len(ps.Finally)), "finally")
}
return nil
}
func validateFinalTasks(tasks []PipelineTask, finalTasks []PipelineTask) (errs *apis.FieldError) {
for idx, f := range finalTasks {
if len(f.RunAfter) != 0 {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("no runAfter allowed under spec.finally, final task %s has runAfter specified", f.Name), "").ViaFieldIndex("finally", idx))
}
}
ts := PipelineTaskList(tasks).Names()
fts := PipelineTaskList(finalTasks).Names()
errs = errs.Also(validateTaskResultReferenceInFinallyTasks(finalTasks, ts, fts))
return errs
}
func validateTaskResultReferenceInFinallyTasks(finalTasks []PipelineTask, ts sets.String, fts sets.String) (errs *apis.FieldError) {
for idx, t := range finalTasks {
for _, p := range t.Params {
if expressions, ok := p.GetVarSubstitutionExpressions(); ok {
errs = errs.Also(validateResultsVariablesExpressionsInFinally(expressions, ts, fts, "value").ViaFieldKey(
"params", p.Name).ViaFieldIndex("finally", idx))
}
}
for i, we := range t.When {
if expressions, ok := we.GetVarSubstitutionExpressions(); ok {
errs = errs.Also(validateResultsVariablesExpressionsInFinally(expressions, ts, fts, "").ViaFieldIndex(
"when", i).ViaFieldIndex("finally", idx))
}
}
}
return errs
}
func validateResultsVariablesExpressionsInFinally(expressions []string, pipelineTasksNames sets.String, finalTasksNames sets.String, fieldPath string) (errs *apis.FieldError) {
if LooksLikeContainsResultRefs(expressions) {
resultRefs := NewResultRefs(expressions)
for _, resultRef := range resultRefs {
pt := resultRef.PipelineTask
if finalTasksNames.Has(pt) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("invalid task result reference, "+
"final task has task result reference from a final task %s", pt), fieldPath))
} else if !pipelineTasksNames.Has(resultRef.PipelineTask) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("invalid task result reference, "+
"final task has task result reference from a task %s which is not defined in the pipeline", pt), fieldPath))
}
}
}
return errs
}
func validateWhenExpressions(ctx context.Context, tasks []PipelineTask, finalTasks []PipelineTask) (errs *apis.FieldError) {
for i, t := range tasks {
errs = errs.Also(t.When.validate(ctx).ViaFieldIndex("tasks", i))
}
for i, t := range finalTasks {
errs = errs.Also(t.When.validate(ctx).ViaFieldIndex("finally", i))
}
return errs
}
// validateGraph ensures the Pipeline's dependency Graph (DAG) make sense: that there is no dependency
// cycle or that they rely on values from Tasks that ran previously.
func validateGraph(tasks []PipelineTask) (errs *apis.FieldError) {
if _, err := dag.Build(PipelineTaskList(tasks), PipelineTaskList(tasks).Deps()); err != nil {
errs = errs.Also(apis.ErrInvalidValue(err.Error(), "tasks"))
}
return errs
}
func validateMatrix(ctx context.Context, tasks []PipelineTask) (errs *apis.FieldError) {
for idx, task := range tasks {
errs = errs.Also(task.validateMatrix(ctx).ViaIndex(idx))
}
errs = errs.Also(validateTaskResultsFromMatrixedPipelineTasksConsumed(tasks))
return errs
}
// findAndValidateResultRefsForMatrix checks that any result references to Matrixed PipelineTasks if consumed
// by another PipelineTask that the entire array of results produced by a matrix is consumed in aggregate
// since consuming a singular result produced by a matrix is currently not supported
func findAndValidateResultRefsForMatrix(tasks []PipelineTask, taskMapping map[string]PipelineTask) (resultRefs []*ResultRef, errs *apis.FieldError) {
for _, t := range tasks {
for _, p := range t.Params {
if expressions, ok := p.GetVarSubstitutionExpressions(); ok {
if LooksLikeContainsResultRefs(expressions) {
resultRefs, errs = validateMatrixedPipelineTaskConsumed(expressions, taskMapping)
if errs != nil {
return nil, errs
}
}
}
}
}
return resultRefs, errs
}
// validateMatrixedPipelineTaskConsumed checks that any Matrixed Pipeline Task that the is being consumed is consumed in
// aggregate [*] since consuming a singular result produced by a matrix is currently not supported
func validateMatrixedPipelineTaskConsumed(expressions []string, taskMapping map[string]PipelineTask) (resultRefs []*ResultRef, errs *apis.FieldError) {
var filteredExpressions []string
for _, expression := range expressions {
// if it is not matrix result ref expression, skip
if !resultref.LooksLikeResultRef(expression) {
continue
}
// ie. "tasks.<pipelineTaskName>.results.<resultName>[*]"
subExpressions := strings.Split(expression, ".")
pipelineTask := subExpressions[1] // pipelineTaskName
taskConsumed := taskMapping[pipelineTask]
if taskConsumed.IsMatrixed() {
if !strings.HasSuffix(expression, "[*]") {
errs = errs.Also(apis.ErrGeneric("A matrixed pipelineTask can only be consumed in aggregate using [*] notation, but is currently set to " + expression))
}
filteredExpressions = append(filteredExpressions, expression)
}
}
return NewResultRefs(filteredExpressions), errs
}
// validateTaskResultsFromMatrixedPipelineTasksConsumed checks that any Matrixed Pipeline Task that the is being consumed
// is consumed in aggregate [*] since consuming a singular result produced by a matrix is currently not supported.
// It also validates that a matrix emitting results can only emit results with the underlying type string
// if those results are being consumed by another PipelineTask.
func validateTaskResultsFromMatrixedPipelineTasksConsumed(tasks []PipelineTask) (errs *apis.FieldError) {
taskMapping := createTaskMapping(tasks)
resultRefs, errs := findAndValidateResultRefsForMatrix(tasks, taskMapping)
if errs != nil {
return errs
}
errs = errs.Also(validateMatrixEmittingStringResults(resultRefs, taskMapping))
return errs
}
// createTaskMapping maps the PipelineTaskName to the PipelineTask to easily access
// the pipelineTask by Name
func createTaskMapping(tasks []PipelineTask) (taskMap map[string]PipelineTask) {
taskMapping := make(map[string]PipelineTask)
for _, task := range tasks {
taskMapping[task.Name] = task
}
return taskMapping
}
// validateMatrixEmittingStringResults checks a matrix emitting results can only emit results with the underlying type string
// if those results are being consumed by another PipelineTask. Note: It is not possible to validate remote tasks
func validateMatrixEmittingStringResults(resultRefs []*ResultRef, taskMapping map[string]PipelineTask) (errs *apis.FieldError) {
for _, resultRef := range resultRefs {
task := taskMapping[resultRef.PipelineTask]
resultName := resultRef.Result
if task.TaskRef != nil {
referencedTask := taskMapping[task.TaskRef.Name]
if referencedTask.TaskSpec != nil {
errs = errs.Also(validateStringResults(referencedTask.TaskSpec.Results, resultName))
}
} else if task.TaskSpec != nil {
errs = errs.Also(validateStringResults(task.TaskSpec.Results, resultName))
}
}
return errs
}
// validateStringResults ensure that the result type is string
func validateStringResults(results []TaskResult, resultName string) (errs *apis.FieldError) {
for _, result := range results {
if result.Name == resultName {
if result.Type != ResultsTypeString {
errs = errs.Also(apis.ErrInvalidValue(
fmt.Sprintf("Matrixed PipelineTasks emitting results must have an underlying type string, but result %s has type %s in pipelineTask", resultName, string(result.Type)),
"",
))
}
}
}
return errs
}
// validateArtifactReference ensure that the feature flag enableArtifacts is set to true when using artifacts
func validateArtifactReference(ctx context.Context, tasks []PipelineTask, finalTasks []PipelineTask) (errs *apis.FieldError) {
if config.FromContextOrDefaults(ctx).FeatureFlags.EnableArtifacts {
return errs
}
for i, t := range tasks {
for _, v := range t.Params.extractValues() {
if len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(v, -1)) > 0 {
return errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "").ViaField("params").ViaFieldIndex("tasks", i))
}
}
}
for i, t := range finalTasks {
for _, v := range t.Params.extractValues() {
if len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(v, -1)) > 0 {
return errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "").ViaField("params").ViaFieldIndex("finally", i))
}
}
}
return errs
}
// GetIndexingReferencesToArrayParams returns all strings referencing indices of PipelineRun array parameters
// from parameters, workspaces, and when expressions defined in the Pipeline's Tasks and Finally Tasks.
// For example, if a Task in the Pipeline has a parameter with a value "$(params.array-param-name[1])",
// this would be one of the strings returned.
func (ps *PipelineSpec) GetIndexingReferencesToArrayParams() sets.String {
paramsRefs := []string{}
for i := range ps.Tasks {
paramsRefs = append(paramsRefs, ps.Tasks[i].Params.extractValues()...)
if ps.Tasks[i].IsMatrixed() {
paramsRefs = append(paramsRefs, ps.Tasks[i].Matrix.Params.extractValues()...)
}
for j := range ps.Tasks[i].Workspaces {
paramsRefs = append(paramsRefs, ps.Tasks[i].Workspaces[j].SubPath)
}
for _, wes := range ps.Tasks[i].When {
paramsRefs = append(paramsRefs, wes.Input)
paramsRefs = append(paramsRefs, wes.Values...)
}
}
for i := range ps.Finally {
paramsRefs = append(paramsRefs, ps.Finally[i].Params.extractValues()...)
if ps.Finally[i].IsMatrixed() {
paramsRefs = append(paramsRefs, ps.Finally[i].Matrix.Params.extractValues()...)
}
for _, wes := range ps.Finally[i].When {
paramsRefs = append(paramsRefs, wes.Input)
paramsRefs = append(paramsRefs, wes.Values...)
}
}
// extract all array indexing references, for example []{"$(params.array-params[1])"}
arrayIndexParamRefs := []string{}
for _, p := range paramsRefs {
arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...)
}
return sets.NewString(arrayIndexParamRefs...)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"knative.dev/pkg/apis"
)
// Validate ensures that a supplied PipelineRef field is populated
// correctly. No errors are returned for a nil PipelineRef.
func (ref *PipelineRef) Validate(ctx context.Context) (errs *apis.FieldError) {
if ref == nil {
return errs
}
return validateRef(ctx, ref.Name, ref.Resolver, ref.Params)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"knative.dev/pkg/apis"
)
var _ apis.Convertible = (*PipelineRun)(nil)
// ConvertTo implements apis.Convertible
func (pr *PipelineRun) ConvertTo(ctx context.Context, sink apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
return fmt.Errorf("v1 is the highest known version, got: %T", sink)
}
// ConvertFrom implements apis.Convertible
func (pr *PipelineRun) ConvertFrom(ctx context.Context, source apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
return fmt.Errorf("v1 is the highest known version, got: %T", source)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"regexp"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/kmap"
)
var (
_ apis.Defaultable = (*PipelineRun)(nil)
filterReservedAnnotationRegexp = regexp.MustCompile(pipeline.TektonReservedAnnotationExpr)
)
// SetDefaults implements apis.Defaultable
func (pr *PipelineRun) SetDefaults(ctx context.Context) {
pr.Spec.SetDefaults(ctx)
// Silently filtering out Tekton Reserved annotations at creation
if apis.IsInCreate(ctx) {
pr.ObjectMeta.Annotations = kmap.Filter(pr.ObjectMeta.Annotations, func(s string) bool {
return filterReservedAnnotationRegexp.MatchString(s)
})
}
}
// SetDefaults implements apis.Defaultable
func (prs *PipelineRunSpec) SetDefaults(ctx context.Context) {
cfg := config.FromContextOrDefaults(ctx)
if prs.PipelineRef != nil && prs.PipelineRef.Name == "" && prs.PipelineRef.Resolver == "" {
prs.PipelineRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType)
}
if prs.Timeouts == nil {
prs.Timeouts = &TimeoutFields{}
}
if prs.Timeouts.Pipeline == nil {
prs.Timeouts.Pipeline = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute}
}
defaultSA := cfg.Defaults.DefaultServiceAccount
if prs.TaskRunTemplate.ServiceAccountName == "" && defaultSA != "" {
prs.TaskRunTemplate.ServiceAccountName = defaultSA
}
defaultPodTemplate := cfg.Defaults.DefaultPodTemplate
prs.TaskRunTemplate.PodTemplate = pod.MergePodTemplateWithDefault(prs.TaskRunTemplate.PodTemplate, defaultPodTemplate)
if prs.PipelineSpec != nil {
prs.PipelineSpec.SetDefaults(ctx)
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
runv1beta1 "github.com/tektoncd/pipeline/pkg/apis/run/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
// +genclient
// +genreconciler:krshapedlogic=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PipelineRun represents a single execution of a Pipeline. PipelineRuns are how
// the graph of Tasks declared in a Pipeline are executed; they specify inputs
// to Pipelines such as parameter values and capture operational aspects of the
// Tasks execution such as service account and tolerations. Creating a
// PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.
//
// +k8s:openapi-gen=true
// +kubebuilder:storageversion
type PipelineRun struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec PipelineRunSpec `json:"spec,omitempty"`
// +optional
Status PipelineRunStatus `json:"status,omitempty"`
}
// GetName Returns the name of the PipelineRun
func (pr *PipelineRun) GetName() string {
return pr.ObjectMeta.GetName()
}
// GetStatusCondition returns the task run status as a ConditionAccessor
func (pr *PipelineRun) GetStatusCondition() apis.ConditionAccessor {
return &pr.Status
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*PipelineRun) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind(pipeline.PipelineRunControllerName)
}
// IsDone returns true if the PipelineRun's status indicates that it is done.
func (pr *PipelineRun) IsDone() bool {
return !pr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown()
}
// HasStarted function check whether pipelinerun has valid start time set in its status
func (pr *PipelineRun) HasStarted() bool {
return pr.Status.StartTime != nil && !pr.Status.StartTime.IsZero()
}
// IsSuccessful returns true if the PipelineRun's status indicates that it has succeeded.
func (pr *PipelineRun) IsSuccessful() bool {
return pr != nil && pr.Status.GetCondition(apis.ConditionSucceeded).IsTrue()
}
// IsFailure returns true if the PipelineRun's status indicates that it has failed.
func (pr *PipelineRun) IsFailure() bool {
return pr != nil && pr.Status.GetCondition(apis.ConditionSucceeded).IsFalse()
}
// IsCancelled returns true if the PipelineRun's spec status is set to Cancelled state
func (pr *PipelineRun) IsCancelled() bool {
return pr.Spec.Status == PipelineRunSpecStatusCancelled
}
// IsGracefullyCancelled returns true if the PipelineRun's spec status is set to CancelledRunFinally state
func (pr *PipelineRun) IsGracefullyCancelled() bool {
return pr.Spec.Status == PipelineRunSpecStatusCancelledRunFinally
}
// IsGracefullyStopped returns true if the PipelineRun's spec status is set to StoppedRunFinally state
func (pr *PipelineRun) IsGracefullyStopped() bool {
return pr.Spec.Status == PipelineRunSpecStatusStoppedRunFinally
}
// PipelineTimeout returns the applicable timeout for the PipelineRun
func (pr *PipelineRun) PipelineTimeout(ctx context.Context) time.Duration {
if pr.Spec.Timeouts != nil && pr.Spec.Timeouts.Pipeline != nil {
return pr.Spec.Timeouts.Pipeline.Duration
}
return time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) * time.Minute
}
// TasksTimeout returns the tasks timeout for the PipelineRun, if set,
// or the tasks timeout computed from the Pipeline and Finally timeouts, if those are set.
func (pr *PipelineRun) TasksTimeout() *metav1.Duration {
t := pr.Spec.Timeouts
if t == nil {
return nil
}
if t.Tasks != nil {
return t.Tasks
}
if t.Pipeline != nil && t.Finally != nil {
if t.Pipeline.Duration == config.NoTimeoutDuration || t.Finally.Duration == config.NoTimeoutDuration {
return nil
}
return &metav1.Duration{Duration: (t.Pipeline.Duration - t.Finally.Duration)}
}
return nil
}
// FinallyTimeout returns the finally timeout for the PipelineRun, if set,
// or the finally timeout computed from the Pipeline and Tasks timeouts, if those are set.
func (pr *PipelineRun) FinallyTimeout() *metav1.Duration {
t := pr.Spec.Timeouts
if t == nil {
return nil
}
if t.Finally != nil {
return t.Finally
}
if t.Pipeline != nil && t.Tasks != nil {
if t.Pipeline.Duration == config.NoTimeoutDuration || t.Tasks.Duration == config.NoTimeoutDuration {
return nil
}
return &metav1.Duration{Duration: (t.Pipeline.Duration - t.Tasks.Duration)}
}
return nil
}
// IsPending returns true if the PipelineRun's spec status is set to Pending state
func (pr *PipelineRun) IsPending() bool {
return pr.Spec.Status == PipelineRunSpecStatusPending
}
// GetNamespacedName returns a k8s namespaced name that identifies this PipelineRun
func (pr *PipelineRun) GetNamespacedName() types.NamespacedName {
return types.NamespacedName{Namespace: pr.Namespace, Name: pr.Name}
}
// IsTimeoutConditionSet returns true when the pipelinerun has the pipelinerun timed out reason
func (pr *PipelineRun) IsTimeoutConditionSet() bool {
condition := pr.Status.GetCondition(apis.ConditionSucceeded)
return condition.IsFalse() && condition.Reason == PipelineRunReasonTimedOut.String()
}
// SetTimeoutCondition sets the status of the PipelineRun to timed out.
func (pr *PipelineRun) SetTimeoutCondition(ctx context.Context) {
pr.Status.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: PipelineRunReasonTimedOut.String(),
Message: fmt.Sprintf("PipelineRun %q failed to finish within %q", pr.Name, pr.PipelineTimeout(ctx).String()),
})
}
// HasTimedOut returns true if a pipelinerun has exceeded its spec.Timeout based on its status.Timeout
func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bool {
timeout := pr.PipelineTimeout(ctx)
startTime := pr.Status.StartTime
if !startTime.IsZero() {
if timeout == config.NoTimeoutDuration {
return false
}
runtime := c.Since(startTime.Time)
if runtime > timeout {
return true
}
}
return false
}
// HasTimedOutForALongTime returns true if a pipelinerun has exceeed its spec.Timeout based its status.StartTime
// by a large margin
func (pr *PipelineRun) HasTimedOutForALongTime(ctx context.Context, c clock.PassiveClock) bool {
if !pr.HasTimedOut(ctx, c) {
return false
}
timeout := pr.PipelineTimeout(ctx)
startTime := pr.Status.StartTime
runtime := c.Since(startTime.Time)
// We are arbitrarily defining large margin as doubling the spec.timeout
return runtime >= 2*timeout
}
// HaveTasksTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Tasks
func (pr *PipelineRun) HaveTasksTimedOut(ctx context.Context, c clock.PassiveClock) bool {
timeout := pr.TasksTimeout()
startTime := pr.Status.StartTime
if !startTime.IsZero() && timeout != nil {
if timeout.Duration == config.NoTimeoutDuration {
return false
}
runtime := c.Since(startTime.Time)
if runtime > timeout.Duration {
return true
}
}
return false
}
// HasFinallyTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Finally, based on status.FinallyStartTime
func (pr *PipelineRun) HasFinallyTimedOut(ctx context.Context, c clock.PassiveClock) bool {
timeout := pr.FinallyTimeout()
startTime := pr.Status.FinallyStartTime
if startTime != nil && !startTime.IsZero() && timeout != nil {
if timeout.Duration == config.NoTimeoutDuration {
return false
}
runtime := c.Since(startTime.Time)
if runtime > timeout.Duration {
return true
}
}
return false
}
// HasVolumeClaimTemplate returns true if PipelineRun contains volumeClaimTemplates that is
// used for creating PersistentVolumeClaims with an OwnerReference for each run
func (pr *PipelineRun) HasVolumeClaimTemplate() bool {
for _, ws := range pr.Spec.Workspaces {
if ws.VolumeClaimTemplate != nil {
return true
}
}
return false
}
// PipelineRunSpec defines the desired state of PipelineRun
type PipelineRunSpec struct {
// +optional
PipelineRef *PipelineRef `json:"pipelineRef,omitempty"`
// Specifying PipelineSpec can be disabled by setting
// `disable-inline-spec` feature flag.
// See Pipeline.spec (API version: tekton.dev/v1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
PipelineSpec *PipelineSpec `json:"pipelineSpec,omitempty"`
// Params is a list of parameter names and values.
Params Params `json:"params,omitempty"`
// Used for cancelling a pipelinerun (and maybe more later on)
// +optional
Status PipelineRunSpecStatus `json:"status,omitempty"`
// Time after which the Pipeline times out.
// Currently three keys are accepted in the map
// pipeline, tasks and finally
// with Timeouts.pipeline >= Timeouts.tasks + Timeouts.finally
// +optional
Timeouts *TimeoutFields `json:"timeouts,omitempty"`
// TaskRunTemplate represent template of taskrun
// +optional
TaskRunTemplate PipelineTaskRunTemplate `json:"taskRunTemplate,omitempty"`
// Workspaces holds a set of workspace bindings that must match names
// with those declared in the pipeline.
// +optional
// +listType=atomic
Workspaces []WorkspaceBinding `json:"workspaces,omitempty"`
// TaskRunSpecs holds a set of runtime specs
// +optional
// +listType=atomic
TaskRunSpecs []PipelineTaskRunSpec `json:"taskRunSpecs,omitempty"`
// ManagedBy indicates which controller is responsible for reconciling
// this resource. If unset or set to "tekton.dev/pipeline", the default
// Tekton controller will manage this resource.
// This field is immutable.
// +optional
ManagedBy *string `json:"managedBy,omitempty"`
}
// TimeoutFields allows granular specification of pipeline, task, and finally timeouts
type TimeoutFields struct {
// Pipeline sets the maximum allowed duration for execution of the entire pipeline. The sum of individual timeouts for tasks and finally must not exceed this value.
Pipeline *metav1.Duration `json:"pipeline,omitempty"`
// Tasks sets the maximum allowed duration of this pipeline's tasks
Tasks *metav1.Duration `json:"tasks,omitempty"`
// Finally sets the maximum allowed duration of this pipeline's finally
Finally *metav1.Duration `json:"finally,omitempty"`
}
// PipelineRunSpecStatus defines the pipelinerun spec status the user can provide
type PipelineRunSpecStatus string
const (
// PipelineRunSpecStatusCancelled indicates that the user wants to cancel the task,
// if not already cancelled or terminated
PipelineRunSpecStatusCancelled = "Cancelled"
// PipelineRunSpecStatusCancelledRunFinally indicates that the user wants to cancel the pipeline run,
// if not already cancelled or terminated, but ensure finally is run normally
PipelineRunSpecStatusCancelledRunFinally = "CancelledRunFinally"
// PipelineRunSpecStatusStoppedRunFinally indicates that the user wants to stop the pipeline run,
// wait for already running tasks to be completed and run finally
// if not already cancelled or terminated
PipelineRunSpecStatusStoppedRunFinally = "StoppedRunFinally"
// PipelineRunSpecStatusPending indicates that the user wants to postpone starting a PipelineRun
// until some condition is met
PipelineRunSpecStatusPending = "PipelineRunPending"
)
// PipelineRunStatus defines the observed state of PipelineRun
type PipelineRunStatus struct {
duckv1.Status `json:",inline"`
// PipelineRunStatusFields inlines the status fields.
PipelineRunStatusFields `json:",inline"`
}
// PipelineRunReason represents a reason for the pipeline run "Succeeded" condition
type PipelineRunReason string
const (
// PipelineRunReasonStarted is the reason set when the PipelineRun has just started
PipelineRunReasonStarted PipelineRunReason = "Started"
// PipelineRunReasonRunning is the reason set when the PipelineRun is running
PipelineRunReasonRunning PipelineRunReason = "Running"
// PipelineRunReasonSuccessful is the reason set when the PipelineRun completed successfully
PipelineRunReasonSuccessful PipelineRunReason = "Succeeded"
// PipelineRunReasonCompleted is the reason set when the PipelineRun completed successfully with one or more skipped Tasks
PipelineRunReasonCompleted PipelineRunReason = "Completed"
// PipelineRunReasonFailed is the reason set when the PipelineRun completed with a failure
PipelineRunReasonFailed PipelineRunReason = "Failed"
// PipelineRunReasonCancelled is the reason set when the PipelineRun cancelled by the user
// This reason may be found with a corev1.ConditionFalse status, if the cancellation was processed successfully
// This reason may be found with a corev1.ConditionUnknown status, if the cancellation is being processed or failed
PipelineRunReasonCancelled PipelineRunReason = "Cancelled"
// PipelineRunReasonPending is the reason set when the PipelineRun is in the pending state
PipelineRunReasonPending PipelineRunReason = "PipelineRunPending"
// PipelineRunReasonTimedOut is the reason set when the PipelineRun has timed out
PipelineRunReasonTimedOut PipelineRunReason = "PipelineRunTimeout"
// PipelineRunReasonStopping indicates that no new Tasks will be scheduled by the controller, and the
// pipeline will stop once all running tasks complete their work
PipelineRunReasonStopping PipelineRunReason = "PipelineRunStopping"
// PipelineRunReasonCancelledRunningFinally indicates that pipeline has been gracefully cancelled
// and no new Tasks will be scheduled by the controller, but final tasks are now running
PipelineRunReasonCancelledRunningFinally PipelineRunReason = "CancelledRunningFinally"
// PipelineRunReasonStoppedRunningFinally indicates that pipeline has been gracefully stopped
// and no new Tasks will be scheduled by the controller, but final tasks are now running
PipelineRunReasonStoppedRunningFinally PipelineRunReason = "StoppedRunningFinally"
// ReasonCouldntGetPipeline indicates that the reason for the failure status is that the
// associated Pipeline couldn't be retrieved
PipelineRunReasonCouldntGetPipeline PipelineRunReason = "CouldntGetPipeline"
// ReasonInvalidBindings indicates that the reason for the failure status is that the
// PipelineResources bound in the PipelineRun didn't match those declared in the Pipeline
PipelineRunReasonInvalidBindings PipelineRunReason = "InvalidPipelineResourceBindings"
// ReasonInvalidWorkspaceBinding indicates that a Pipeline expects a workspace but a
// PipelineRun has provided an invalid binding.
PipelineRunReasonInvalidWorkspaceBinding PipelineRunReason = "InvalidWorkspaceBindings"
// ReasonInvalidTaskRunSpec indicates that PipelineRun.Spec.TaskRunSpecs[].PipelineTaskName is defined with
// a not exist taskName in pipelineSpec.
PipelineRunReasonInvalidTaskRunSpec PipelineRunReason = "InvalidTaskRunSpecs"
// ReasonParameterTypeMismatch indicates that the reason for the failure status is that
// parameter(s) declared in the PipelineRun do not have the some declared type as the
// parameters(s) declared in the Pipeline that they are supposed to override.
PipelineRunReasonParameterTypeMismatch PipelineRunReason = "ParameterTypeMismatch"
// ReasonObjectParameterMissKeys indicates that the object param value provided from PipelineRun spec
// misses some keys required for the object param declared in Pipeline spec.
PipelineRunReasonObjectParameterMissKeys PipelineRunReason = "ObjectParameterMissKeys"
// ReasonParamArrayIndexingInvalid indicates that the use of param array indexing is out of bound.
PipelineRunReasonParamArrayIndexingInvalid PipelineRunReason = "ParamArrayIndexingInvalid"
// ReasonCouldntGetTask indicates that the reason for the failure status is that the
// associated Pipeline's Tasks couldn't all be retrieved
PipelineRunReasonCouldntGetTask PipelineRunReason = "CouldntGetTask"
// ReasonParameterMissing indicates that the reason for the failure status is that the
// associated PipelineRun didn't provide all the required parameters
PipelineRunReasonParameterMissing PipelineRunReason = "ParameterMissing"
// ReasonFailedValidation indicates that the reason for failure status is
// that pipelinerun failed runtime validation
PipelineRunReasonFailedValidation PipelineRunReason = "PipelineValidationFailed"
// PipelineRunReasonCouldntGetPipelineResult indicates that the pipeline fails to retrieve the
// referenced result. This could be due to failed TaskRuns or Runs that were supposed to produce
// the results
PipelineRunReasonCouldntGetPipelineResult PipelineRunReason = "CouldntGetPipelineResult"
// ReasonInvalidGraph indicates that the reason for the failure status is that the
// associated Pipeline is an invalid graph (a.k.a wrong order, cycle, …)
PipelineRunReasonInvalidGraph PipelineRunReason = "PipelineInvalidGraph"
// ReasonCouldntCancel indicates that a PipelineRun was cancelled but attempting to update
// all of the running TaskRuns as cancelled failed.
PipelineRunReasonCouldntCancel PipelineRunReason = "PipelineRunCouldntCancel"
// ReasonCouldntTimeOut indicates that a PipelineRun was timed out but attempting to update
// all of the running TaskRuns as timed out failed.
PipelineRunReasonCouldntTimeOut PipelineRunReason = "PipelineRunCouldntTimeOut"
// ReasonInvalidMatrixParameterTypes indicates a matrix contains invalid parameter types
PipelineRunReasonInvalidMatrixParameterTypes PipelineRunReason = "InvalidMatrixParameterTypes"
// ReasonInvalidTaskResultReference indicates a task result was declared
// but was not initialized by that task
PipelineRunReasonInvalidTaskResultReference PipelineRunReason = "InvalidTaskResultReference"
// PipelineRunReasonInvalidPipelineResultReference indicates a pipeline result was declared
// by the pipeline but not initialized in the pipelineTask
PipelineRunReasonInvalidPipelineResultReference PipelineRunReason = "InvalidPipelineResultReference"
// ReasonRequiredWorkspaceMarkedOptional indicates an optional workspace
// has been passed to a Task that is expecting a non-optional workspace
PipelineRunReasonRequiredWorkspaceMarkedOptional PipelineRunReason = "RequiredWorkspaceMarkedOptional"
// ReasonResolvingPipelineRef indicates that the PipelineRun is waiting for
// its pipelineRef to be asynchronously resolved.
PipelineRunReasonResolvingPipelineRef PipelineRunReason = "ResolvingPipelineRef"
// ReasonResourceVerificationFailed indicates that the pipeline fails the trusted resource verification,
// it could be the content has changed, signature is invalid or public key is invalid
PipelineRunReasonResourceVerificationFailed PipelineRunReason = "ResourceVerificationFailed"
// ReasonCreateRunFailed indicates that the pipeline fails to create the taskrun or other run resources
PipelineRunReasonCreateRunFailed PipelineRunReason = "CreateRunFailed"
// ReasonCELEvaluationFailed indicates the pipeline fails the CEL evaluation
PipelineRunReasonCELEvaluationFailed PipelineRunReason = "CELEvaluationFailed"
// PipelineRunReasonInvalidParamValue indicates that the PipelineRun Param input value is not allowed.
PipelineRunReasonInvalidParamValue PipelineRunReason = "InvalidParamValue"
)
// PipelineTaskOnErrorAnnotation is used to pass the failure strategy to TaskRun pods from PipelineTask OnError field
const PipelineTaskOnErrorAnnotation = "pipeline.tekton.dev/pipeline-task-on-error"
func (t PipelineRunReason) String() string {
return string(t)
}
var pipelineRunCondSet = apis.NewBatchConditionSet()
// GetCondition returns the Condition matching the given type.
func (pr *PipelineRunStatus) GetCondition(t apis.ConditionType) *apis.Condition {
return pipelineRunCondSet.Manage(pr).GetCondition(t)
}
// InitializeConditions will set all conditions in pipelineRunCondSet to unknown for the PipelineRun
// and set the started time to the current time
func (pr *PipelineRunStatus) InitializeConditions(c clock.PassiveClock) {
started := false
if pr.StartTime.IsZero() {
pr.StartTime = &metav1.Time{Time: c.Now()}
started = true
}
conditionManager := pipelineRunCondSet.Manage(pr)
conditionManager.InitializeConditions()
// Ensure the started reason is set for the "Succeeded" condition
if started {
initialCondition := conditionManager.GetCondition(apis.ConditionSucceeded)
initialCondition.Reason = PipelineRunReasonStarted.String()
conditionManager.SetCondition(*initialCondition)
}
}
// SetCondition sets the condition, unsetting previous conditions with the same
// type as necessary.
func (pr *PipelineRunStatus) SetCondition(newCond *apis.Condition) {
if newCond != nil {
pipelineRunCondSet.Manage(pr).SetCondition(*newCond)
}
}
// MarkSucceeded changes the Succeeded condition to True with the provided reason and message.
func (pr *PipelineRunStatus) MarkSucceeded(reason, messageFormat string, messageA ...interface{}) {
pipelineRunCondSet.Manage(pr).MarkTrueWithReason(apis.ConditionSucceeded, reason, messageFormat, messageA...)
succeeded := pr.GetCondition(apis.ConditionSucceeded)
pr.CompletionTime = &succeeded.LastTransitionTime.Inner
}
// MarkFailed changes the Succeeded condition to False with the provided reason and message.
func (pr *PipelineRunStatus) MarkFailed(reason, messageFormat string, messageA ...interface{}) {
messageFormat = pipelineErrors.LabelUserError(messageFormat, messageA)
pipelineRunCondSet.Manage(pr).MarkFalse(apis.ConditionSucceeded, reason, messageFormat, messageA...)
succeeded := pr.GetCondition(apis.ConditionSucceeded)
pr.CompletionTime = &succeeded.LastTransitionTime.Inner
}
// MarkRunning changes the Succeeded condition to Unknown with the provided reason and message.
func (pr *PipelineRunStatus) MarkRunning(reason, messageFormat string, messageA ...interface{}) {
pipelineRunCondSet.Manage(pr).MarkUnknown(apis.ConditionSucceeded, reason, messageFormat, messageA...)
}
// ChildStatusReference is used to point to the statuses of individual TaskRuns and Runs within this PipelineRun.
type ChildStatusReference struct {
runtime.TypeMeta `json:",inline"`
// Name is the name of the TaskRun or Run this is referencing.
Name string `json:"name,omitempty"`
// DisplayName is a user-facing name of the pipelineTask that may be
// used to populate a UI.
DisplayName string `json:"displayName,omitempty"`
// PipelineTaskName is the name of the PipelineTask this is referencing.
PipelineTaskName string `json:"pipelineTaskName,omitempty"`
// WhenExpressions is the list of checks guarding the execution of the PipelineTask
// +optional
// +listType=atomic
WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"`
}
// PipelineRunStatusFields holds the fields of PipelineRunStatus' status.
// This is defined separately and inlined so that other types can readily
// consume these fields via duck typing.
type PipelineRunStatusFields struct {
// StartTime is the time the PipelineRun is actually started.
StartTime *metav1.Time `json:"startTime,omitempty"`
// CompletionTime is the time the PipelineRun completed.
CompletionTime *metav1.Time `json:"completionTime,omitempty"`
// Results are the list of results written out by the pipeline task's containers
// +optional
// +listType=atomic
Results []PipelineRunResult `json:"results,omitempty"`
// PipelineSpec contains the exact spec used to instantiate the run.
// See Pipeline.spec (API version: tekton.dev/v1)
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
PipelineSpec *PipelineSpec `json:"pipelineSpec,omitempty"`
// list of tasks that were skipped due to when expressions evaluating to false
// +optional
// +listType=atomic
SkippedTasks []SkippedTask `json:"skippedTasks,omitempty"`
// list of TaskRun and Run names, PipelineTask names, and API versions/kinds for children of this PipelineRun.
// +optional
// +listType=atomic
ChildReferences []ChildStatusReference `json:"childReferences,omitempty"`
// FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.
// +optional
FinallyStartTime *metav1.Time `json:"finallyStartTime,omitempty"`
// Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).
// +optional
Provenance *Provenance `json:"provenance,omitempty"`
// SpanContext contains tracing span context fields
SpanContext map[string]string `json:"spanContext,omitempty"`
}
// SkippedTask is used to describe the Tasks that were skipped due to their When Expressions
// evaluating to False. This is a struct because we are looking into including more details
// about the When Expressions that caused this Task to be skipped.
type SkippedTask struct {
// Name is the Pipeline Task name
Name string `json:"name"`
// Reason is the cause of the PipelineTask being skipped.
Reason SkippingReason `json:"reason"`
// WhenExpressions is the list of checks guarding the execution of the PipelineTask
// +optional
// +listType=atomic
WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"`
}
// SkippingReason explains why a PipelineTask was skipped.
type SkippingReason string
const (
// WhenExpressionsSkip means the task was skipped due to at least one of its when expressions evaluating to false
WhenExpressionsSkip SkippingReason = "When Expressions evaluated to false"
// ParentTasksSkip means the task was skipped because its parent was skipped
ParentTasksSkip SkippingReason = "Parent Tasks were skipped"
// StoppingSkip means the task was skipped because the pipeline run is stopping
StoppingSkip SkippingReason = "PipelineRun was stopping"
// GracefullyCancelledSkip means the task was skipped because the pipeline run has been gracefully cancelled
GracefullyCancelledSkip SkippingReason = "PipelineRun was gracefully cancelled"
// GracefullyStoppedSkip means the task was skipped because the pipeline run has been gracefully stopped
GracefullyStoppedSkip SkippingReason = "PipelineRun was gracefully stopped"
// MissingResultsSkip means the task was skipped because it's missing necessary results
MissingResultsSkip SkippingReason = "Results were missing"
// PipelineTimedOutSkip means the task was skipped because the PipelineRun has passed its overall timeout.
PipelineTimedOutSkip SkippingReason = "PipelineRun timeout has been reached"
// TasksTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Tasks.
TasksTimedOutSkip SkippingReason = "PipelineRun Tasks timeout has been reached"
// FinallyTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Finally.
FinallyTimedOutSkip SkippingReason = "PipelineRun Finally timeout has been reached"
// EmptyArrayInMatrixParams means the task was skipped because Matrix parameters contain empty array.
EmptyArrayInMatrixParams SkippingReason = "Matrix Parameters have an empty array"
// None means the task was not skipped
None SkippingReason = "None"
)
// PipelineRunResult used to describe the results of a pipeline
type PipelineRunResult struct {
// Name is the result's name as declared by the Pipeline
Name string `json:"name"`
// Value is the result returned from the execution of this PipelineRun
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Value ResultValue `json:"value"`
}
// PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status
type PipelineRunTaskRunStatus struct {
// PipelineTaskName is the name of the PipelineTask.
PipelineTaskName string `json:"pipelineTaskName,omitempty"`
// Status is the TaskRunStatus for the corresponding TaskRun
// +optional
Status *TaskRunStatus `json:"status,omitempty"`
// WhenExpressions is the list of checks guarding the execution of the PipelineTask
// +optional
// +listType=atomic
WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"`
}
// PipelineRunRunStatus contains the name of the PipelineTask for this Run and the Run's Status
type PipelineRunRunStatus struct {
// PipelineTaskName is the name of the PipelineTask.
PipelineTaskName string `json:"pipelineTaskName,omitempty"`
// Status is the RunStatus for the corresponding Run
// +optional
Status *runv1beta1.CustomRunStatus `json:"status,omitempty"`
// WhenExpressions is the list of checks guarding the execution of the PipelineTask
// +optional
// +listType=atomic
WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PipelineRunList contains a list of PipelineRun
type PipelineRunList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []PipelineRun `json:"items,omitempty"`
}
// PipelineTaskRun reports the results of running a step in the Task. Each
// task has the potential to succeed or fail (based on the exit code)
// and produces logs.
type PipelineTaskRun struct {
Name string `json:"name,omitempty"`
}
// PipelineTaskRunSpec can be used to configure specific
// specs for a concrete Task
type PipelineTaskRunSpec struct {
PipelineTaskName string `json:"pipelineTaskName,omitempty"`
ServiceAccountName string `json:"serviceAccountName,omitempty"`
PodTemplate *pod.PodTemplate `json:"podTemplate,omitempty"`
// +listType=atomic
StepSpecs []TaskRunStepSpec `json:"stepSpecs,omitempty"`
// +listType=atomic
SidecarSpecs []TaskRunSidecarSpec `json:"sidecarSpecs,omitempty"`
// +optional
Metadata *PipelineTaskMetadata `json:"metadata,omitempty"`
// Compute resources to use for this TaskRun
ComputeResources *corev1.ResourceRequirements `json:"computeResources,omitempty"`
// Duration after which the TaskRun times out. Overrides the timeout specified
// on the Task's spec if specified. Takes lower precedence to PipelineRun's
// `spec.timeouts.tasks`
// Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
}
// GetTaskRunSpec returns the task specific spec for a given
// PipelineTask if configured, otherwise it returns the PipelineRun's default.
func (pr *PipelineRun) GetTaskRunSpec(pipelineTaskName string) PipelineTaskRunSpec {
s := PipelineTaskRunSpec{
PipelineTaskName: pipelineTaskName,
ServiceAccountName: pr.Spec.TaskRunTemplate.ServiceAccountName,
PodTemplate: pr.Spec.TaskRunTemplate.PodTemplate,
}
for _, task := range pr.Spec.TaskRunSpecs {
if task.PipelineTaskName == pipelineTaskName {
// merge podTemplates specified in pipelineRun.spec.taskRunSpecs[].podTemplate and pipelineRun.spec.podTemplate
// with taskRunSpecs taking higher precedence
s.PodTemplate = pod.MergePodTemplateWithDefault(task.PodTemplate, s.PodTemplate)
if task.ServiceAccountName != "" {
s.ServiceAccountName = task.ServiceAccountName
}
s.StepSpecs = task.StepSpecs
s.SidecarSpecs = task.SidecarSpecs
s.Metadata = task.Metadata
s.ComputeResources = task.ComputeResources
s.Timeout = task.Timeout
}
}
return s
}
// PipelineTaskRunTemplate is used to specify run specifications for all Task in pipelinerun.
type PipelineTaskRunTemplate struct {
// +optional
PodTemplate *pod.PodTemplate `json:"podTemplate,omitempty"`
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"slices"
"strings"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var (
_ apis.Validatable = (*PipelineRun)(nil)
_ resourcesemantics.VerbLimited = (*PipelineRun)(nil)
)
// SupportedVerbs returns the operations that validation should be called for
func (pr *PipelineRun) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate pipelinerun
func (pr *PipelineRun) Validate(ctx context.Context) *apis.FieldError {
errs := validate.ObjectMetadata(pr.GetObjectMeta()).ViaField("metadata")
if pr.IsPending() && pr.HasStarted() {
errs = errs.Also(apis.ErrInvalidValue("PipelineRun cannot be Pending after it is started", "spec.status"))
}
return errs.Also(pr.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec"))
}
// Validate pipelinerun spec
func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
// Validate the spec changes
errs = errs.Also(ps.ValidateUpdate(ctx))
// Must have exactly one of pipelineRef and pipelineSpec.
if ps.PipelineRef == nil && ps.PipelineSpec == nil {
errs = errs.Also(apis.ErrMissingOneOf("pipelineRef", "pipelineSpec"))
}
if ps.PipelineRef != nil && ps.PipelineSpec != nil {
errs = errs.Also(apis.ErrMultipleOneOf("pipelineRef", "pipelineSpec"))
}
// Validate PipelineRef if it's present
if ps.PipelineRef != nil {
errs = errs.Also(ps.PipelineRef.Validate(ctx).ViaField("pipelineRef"))
}
// Validate PipelineSpec if it's present
if ps.PipelineSpec != nil {
if slices.Contains(strings.Split(
config.FromContextOrDefaults(ctx).FeatureFlags.DisableInlineSpec, ","), "pipelinerun") {
errs = errs.Also(apis.ErrDisallowedFields("pipelineSpec"))
}
errs = errs.Also(ps.PipelineSpec.Validate(ctx).ViaField("pipelineSpec"))
}
// Validate PipelineRun parameters
errs = errs.Also(ps.validatePipelineRunParameters(ctx))
// Validate propagated parameters
errs = errs.Also(ps.validateInlineParameters(ctx))
if ps.Timeouts != nil {
// tasks timeout should be a valid duration of at least 0.
errs = errs.Also(validateTimeoutDuration("tasks", ps.Timeouts.Tasks))
// finally timeout should be a valid duration of at least 0.
errs = errs.Also(validateTimeoutDuration("finally", ps.Timeouts.Finally))
// pipeline timeout should be a valid duration of at least 0.
errs = errs.Also(validateTimeoutDuration("pipeline", ps.Timeouts.Pipeline))
if ps.Timeouts.Pipeline != nil {
errs = errs.Also(ps.validatePipelineTimeout(ps.Timeouts.Pipeline.Duration, "should be <= pipeline duration"))
} else {
defaultTimeout := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes)
errs = errs.Also(ps.validatePipelineTimeout(defaultTimeout, "should be <= default timeout duration"))
}
}
// Validate individual TaskRunSpecs with timeout context
for idx, trs := range ps.TaskRunSpecs {
errs = errs.Also(validateTaskRunSpec(ctx, trs, ps.Timeouts).ViaIndex(idx).ViaField("taskRunSpecs"))
}
errs = errs.Also(validateSpecStatus(ps.Status))
if ps.Workspaces != nil {
wsNames := make(map[string]int)
for idx, ws := range ps.Workspaces {
errs = errs.Also(ws.Validate(ctx).ViaFieldIndex("workspaces", idx))
if prevIdx, alreadyExists := wsNames[ws.Name]; alreadyExists {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace %q provided by pipelinerun more than once, at index %d and %d", ws.Name, prevIdx, idx), "name").ViaFieldIndex("workspaces", idx))
}
wsNames[ws.Name] = idx
}
}
if ps.TaskRunTemplate.PodTemplate != nil {
errs = errs.Also(validatePodTemplateEnv(ctx, *ps.TaskRunTemplate.PodTemplate).ViaField("taskRunTemplate"))
}
return errs
}
// ValidateUpdate validates the update of a PipelineRunSpec
func (ps *PipelineRunSpec) ValidateUpdate(ctx context.Context) (errs *apis.FieldError) {
if !apis.IsInUpdate(ctx) {
return
}
oldObj, ok := apis.GetBaseline(ctx).(*PipelineRun)
if !ok || oldObj == nil {
return
}
if (oldObj.Spec.ManagedBy == nil) != (ps.ManagedBy == nil) || (oldObj.Spec.ManagedBy != nil && *oldObj.Spec.ManagedBy != *ps.ManagedBy) {
errs = errs.Also(apis.ErrInvalidValue("managedBy is immutable", "spec.managedBy"))
}
if oldObj.IsDone() {
// try comparing without any copying first
// this handles the common case where only finalizers changed
if equality.Semantic.DeepEqual(&oldObj.Spec, ps) {
return nil // Specs identical, allow update
}
// Specs differ, this could be due to different defaults after upgrade
// Apply current defaults to old spec to normalize
oldCopy := oldObj.Spec.DeepCopy()
oldCopy.SetDefaults(ctx)
if equality.Semantic.DeepEqual(oldCopy, ps) {
return nil // Difference was only defaults, allow update
}
// Real spec changes detected, reject update
errs = errs.Also(apis.ErrInvalidValue("Once the PipelineRun is complete, no updates are allowed", ""))
return errs
}
// Handle started but not done case
old := oldObj.Spec.DeepCopy()
old.Status = ps.Status
old.ManagedBy = ps.ManagedBy // Already tested before
if !equality.Semantic.DeepEqual(old, ps) {
errs = errs.Also(apis.ErrInvalidValue("Once the PipelineRun has started, only status updates are allowed", ""))
}
return
}
func (ps *PipelineRunSpec) validatePipelineRunParameters(ctx context.Context) (errs *apis.FieldError) {
if len(ps.Params) == 0 {
return errs
}
// Validate parameter types and uniqueness
errs = errs.Also(ValidateParameters(ctx, ps.Params).ViaField("params"))
// Validate that task results aren't used in param values
for _, param := range ps.Params {
expressions, ok := param.GetVarSubstitutionExpressions()
if ok {
if LooksLikeContainsResultRefs(expressions) {
expressions = filter(expressions, resultref.LooksLikeResultRef)
resultRefs := NewResultRefs(expressions)
if len(resultRefs) > 0 {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("cannot use result expressions in %v as PipelineRun parameter values", expressions),
"value").ViaFieldKey("params", param.Name))
}
}
}
}
return errs
}
// validateInlineParameters validates parameters that are defined inline.
// This is crucial for propagated parameters since the parameters could
// be defined under pipelineRun and then called directly in the task steps.
// In this case, parameters cannot be validated by the underlying pipelineSpec
// or taskSpec since they may not have the parameters declared because of propagation.
func (ps *PipelineRunSpec) validateInlineParameters(ctx context.Context) (errs *apis.FieldError) {
if ps.PipelineSpec == nil {
return errs
}
paramSpecForValidation := make(map[string]ParamSpec)
for _, p := range ps.Params {
paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation)
}
for _, p := range ps.PipelineSpec.Params {
var err *apis.FieldError
paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation)
if err != nil {
errs = errs.Also(err)
}
}
for _, pt := range ps.PipelineSpec.Tasks {
paramSpecForValidation = appendPipelineTaskParams(paramSpecForValidation, pt.Params)
if pt.TaskSpec != nil && pt.TaskSpec.Params != nil {
for _, p := range pt.TaskSpec.Params {
var err *apis.FieldError
paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation)
if err != nil {
errs = errs.Also(err)
}
}
}
}
var paramSpec []ParamSpec
for _, v := range paramSpecForValidation {
paramSpec = append(paramSpec, v)
}
if ps.PipelineSpec != nil && ps.PipelineSpec.Tasks != nil {
for _, pt := range ps.PipelineSpec.Tasks {
if pt.TaskSpec != nil && pt.TaskSpec.Steps != nil {
errs = errs.Also(ValidateParameterTypes(ctx, paramSpec))
errs = errs.Also(ValidateParameterVariables(ctx, pt.TaskSpec.Steps, paramSpec))
errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, pt.TaskSpec.Steps, paramSpec))
}
}
errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.PipelineSpec.Tasks, paramSpec))
errs = errs.Also(validatePipelineTaskParameterUsage(ps.PipelineSpec.Tasks, paramSpec))
}
return errs
}
func appendPipelineTaskParams(paramSpecForValidation map[string]ParamSpec, params Params) map[string]ParamSpec {
for _, p := range params {
if pSpec, ok := paramSpecForValidation[p.Name]; ok {
if p.Value.ObjectVal != nil {
for k, v := range p.Value.ObjectVal {
pSpec.Default.ObjectVal[k] = v
pSpec.Properties[k] = PropertySpec{Type: ParamTypeString}
}
}
paramSpecForValidation[p.Name] = pSpec
} else {
paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation)
}
}
return paramSpecForValidation
}
func validateSpecStatus(status PipelineRunSpecStatus) *apis.FieldError {
switch status {
case "":
return nil
case PipelineRunSpecStatusPending:
return nil
case PipelineRunSpecStatusCancelled,
PipelineRunSpecStatusCancelledRunFinally,
PipelineRunSpecStatusStoppedRunFinally:
return nil
}
return apis.ErrInvalidValue(fmt.Sprintf("%s should be %s, %s, %s or %s", status,
PipelineRunSpecStatusCancelled,
PipelineRunSpecStatusCancelledRunFinally,
PipelineRunSpecStatusStoppedRunFinally,
PipelineRunSpecStatusPending), "status")
}
func validateTimeoutDuration(field string, d *metav1.Duration) (errs *apis.FieldError) {
if d != nil && d.Duration < 0 {
fieldPath := "timeouts." + field
return errs.Also(apis.ErrInvalidValue(d.Duration.String()+" should be >= 0", fieldPath))
}
return nil
}
func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorMsg string) (errs *apis.FieldError) {
if ps.Timeouts.Tasks != nil {
tasksTimeoutErr := false
tasksTimeoutStr := ps.Timeouts.Tasks.Duration.String()
if ps.Timeouts.Tasks.Duration > timeout && timeout != config.NoTimeoutDuration {
tasksTimeoutErr = true
}
if ps.Timeouts.Tasks.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration {
tasksTimeoutErr = true
tasksTimeoutStr += " (no timeout)"
}
if tasksTimeoutErr {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s %s", tasksTimeoutStr, errorMsg), "timeouts.tasks"))
}
}
if ps.Timeouts.Finally != nil {
finallyTimeoutErr := false
finallyTimeoutStr := ps.Timeouts.Finally.Duration.String()
if ps.Timeouts.Finally.Duration > timeout && timeout != config.NoTimeoutDuration {
finallyTimeoutErr = true
}
if ps.Timeouts.Finally.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration {
finallyTimeoutErr = true
finallyTimeoutStr += " (no timeout)"
}
if finallyTimeoutErr {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s %s", finallyTimeoutStr, errorMsg), "timeouts.finally"))
}
}
if ps.Timeouts.Tasks != nil && ps.Timeouts.Finally != nil {
if ps.Timeouts.Tasks.Duration+ps.Timeouts.Finally.Duration > timeout {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s + %s %s", ps.Timeouts.Tasks.Duration.String(), ps.Timeouts.Finally.Duration.String(), errorMsg), "timeouts.tasks"))
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s + %s %s", ps.Timeouts.Tasks.Duration.String(), ps.Timeouts.Finally.Duration.String(), errorMsg), "timeouts.finally"))
}
}
return errs
}
func validateTaskRunSpec(ctx context.Context, trs PipelineTaskRunSpec, pipelineTimeouts *TimeoutFields) (errs *apis.FieldError) {
if trs.StepSpecs != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepSpecs", config.BetaAPIFields).ViaField("stepSpecs"))
errs = errs.Also(validateStepSpecs(trs.StepSpecs).ViaField("stepSpecs"))
}
if trs.SidecarSpecs != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarSpecs", config.BetaAPIFields).ViaField("sidecarSpecs"))
errs = errs.Also(validateSidecarSpecs(trs.SidecarSpecs).ViaField("sidecarSpecs"))
}
if trs.ComputeResources != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "computeResources", config.BetaAPIFields).ViaField("computeResources"))
errs = errs.Also(validateTaskRunComputeResources(trs.ComputeResources, trs.StepSpecs))
}
if trs.PodTemplate != nil {
errs = errs.Also(validatePodTemplateEnv(ctx, *trs.PodTemplate))
}
errs = errs.Also(validateTaskRunSpecTimeout(ctx, trs.Timeout, pipelineTimeouts))
return errs
}
// validateTaskRunSpecTimeout validates a TaskRunSpec's timeout against pipeline timeouts.
// This function works in isolation and doesn't rely on previous validation steps.
func validateTaskRunSpecTimeout(ctx context.Context, timeout *metav1.Duration, pipelineTimeouts *TimeoutFields) *apis.FieldError {
if timeout == nil {
return nil
}
cfg := config.FromContextOrDefaults(ctx)
// Validate basic timeout (negative values)
if _, err := validateTimeout(timeout, cfg.Defaults.DefaultTimeoutMinutes); err != nil {
return err
}
// Find applicable timeout limit: Tasks -> Pipeline -> Default (60min)
var maxTimeout *metav1.Duration
var timeoutSource string
switch {
case pipelineTimeouts != nil && pipelineTimeouts.Tasks != nil:
if validatedTimeout, err := validateTimeout(pipelineTimeouts.Tasks, cfg.Defaults.DefaultTimeoutMinutes); err != nil {
// Return error if Tasks timeout is invalid (prevents silent failures)
return err
} else {
maxTimeout = validatedTimeout
timeoutSource = "pipeline tasks duration"
}
case pipelineTimeouts != nil && pipelineTimeouts.Pipeline != nil:
if validatedTimeout, err := validateTimeout(pipelineTimeouts.Pipeline, cfg.Defaults.DefaultTimeoutMinutes); err != nil {
// Return error if Pipeline timeout is invalid (prevents silent failures)
return err
} else {
maxTimeout = validatedTimeout
timeoutSource = "pipeline duration"
}
default:
maxTimeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute}
timeoutSource = "default pipeline duration"
}
// always check against max timeout
if maxTimeout != nil && maxTimeout.Duration != config.NoTimeoutDuration {
if taskRunTimeout, _ := validateTimeout(timeout, cfg.Defaults.DefaultTimeoutMinutes); taskRunTimeout.Duration > maxTimeout.Duration { // We know this won't error from above
return apis.ErrInvalidValue(
fmt.Sprintf("%s should be <= %s %s", taskRunTimeout.Duration, timeoutSource, maxTimeout.Duration),
"timeout")
}
}
return nil
}
// validateTimeout validates a timeout field and returns the validated timeout with defaults applied.
// If timeout is nil, returns default timeout. If timeout is negative, returns an error.
func validateTimeout(timeout *metav1.Duration, defaultTimeoutMinutes int) (*metav1.Duration, *apis.FieldError) {
if timeout == nil {
return &metav1.Duration{Duration: time.Duration(defaultTimeoutMinutes) * time.Minute}, nil
}
if timeout.Duration < 0 {
return nil, apis.ErrInvalidValue(timeout.Duration.String()+" should be >= 0", "timeout")
}
return timeout, nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: pipeline.GroupName, Version: "v1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme adds Build types to the scheme.
AddToScheme = schemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Task{},
&TaskList{},
&Pipeline{},
&PipelineList{},
&TaskRun{},
&TaskRunList{},
&PipelineRun{},
&PipelineRunList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import "context"
// SetDefaults set the default type for TaskResult
func (tr *TaskResult) SetDefaults(context.Context) {
if tr == nil {
return
}
if tr.Type == "" {
if tr.Properties != nil {
// Set type to object if `properties` is given
tr.Type = ResultsTypeObject
} else {
// ResultsTypeString is the default value
tr.Type = ResultsTypeString
}
}
// Set default type of object values to string
for key, propertySpec := range tr.Properties {
if propertySpec.Type == "" {
tr.Properties[key] = PropertySpec{Type: ParamType(ResultsTypeString)}
}
}
}
// SetDefaults set the default type for StepResult
func (sr *StepResult) SetDefaults(context.Context) {
if sr == nil {
return
}
if sr.Type == "" {
if sr.Properties != nil {
// Set type to object if `properties` is given
sr.Type = ResultsTypeObject
} else {
// ResultsTypeString is the default value
sr.Type = ResultsTypeString
}
}
// Set default type of object values to string
for key, propertySpec := range sr.Properties {
if propertySpec.Type == "" {
sr.Properties[key] = PropertySpec{Type: ParamType(ResultsTypeString)}
}
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import "strings"
// TaskResult used to describe the results of a task
type TaskResult struct {
// Name the given name
Name string `json:"name"`
// Type is the user-specified type of the result. The possible type
// is currently "string" and will support "array" in following work.
// +optional
Type ResultsType `json:"type,omitempty"`
// Properties is the JSON Schema properties to support key-value pairs results.
// +optional
Properties map[string]PropertySpec `json:"properties,omitempty"`
// Description is a human-readable description of the result
// +optional
Description string `json:"description,omitempty"`
// Value the expression used to retrieve the value of the result from an underlying Step.
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Value *ResultValue `json:"value,omitempty"`
}
// StepResult used to describe the Results of a Step.
type StepResult struct {
// Name the given name
Name string `json:"name"`
// The possible types are 'string', 'array', and 'object', with 'string' as the default.
// +optional
Type ResultsType `json:"type,omitempty"`
// Properties is the JSON Schema properties to support key-value pairs results.
// +optional
Properties map[string]PropertySpec `json:"properties,omitempty"`
// Description is a human-readable description of the result
// +optional
Description string `json:"description,omitempty"`
}
// TaskRunResult used to describe the results of a task
type TaskRunResult struct {
// Name the given name
Name string `json:"name"`
// Type is the user-specified type of the result. The possible type
// is currently "string" and will support "array" in following work.
// +optional
Type ResultsType `json:"type,omitempty"`
// Value the given value of the result
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Value ResultValue `json:"value"`
}
// TaskRunStepResult is a type alias of TaskRunResult
type TaskRunStepResult = TaskRunResult
// ResultValue is a type alias of ParamValue
type ResultValue = ParamValue
// ResultsType indicates the type of a result;
// Used to distinguish between a single string and an array of strings.
// Note that there is ResultType used to find out whether a
// RunResult is from a task result or not, which is different from
// this ResultsType.
type ResultsType string
// Valid ResultsType:
const (
ResultsTypeString ResultsType = "string"
ResultsTypeArray ResultsType = "array"
ResultsTypeObject ResultsType = "object"
)
// AllResultsTypes can be used for ResultsTypes validation.
var AllResultsTypes = []ResultsType{ResultsTypeString, ResultsTypeArray, ResultsTypeObject}
// ResultsArrayReference returns the reference of the result. e.g. results.resultname from $(results.resultname[*])
func ResultsArrayReference(a string) string {
return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(a, "$("), ")"), "[*]")
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"regexp"
"k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
)
// Validate implements apis.Validatable
func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) {
if !resultNameFormatRegex.MatchString(tr.Name) {
return apis.ErrInvalidKeyName(tr.Name, "name", fmt.Sprintf("Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat))
}
switch {
case tr.Type == ResultsTypeObject:
errs = errs.Also(validateObjectResult(tr))
case tr.Type == ResultsTypeArray:
// Resources created before the result. Type was introduced may not have Type set
// and should be considered valid
case tr.Type == "":
// By default, the result type is string
case tr.Type != ResultsTypeString:
errs = errs.Also(apis.ErrInvalidValue(tr.Type, "type", "type must be string"))
}
return errs.Also(tr.validateValue(ctx))
}
// validateObjectResult validates the object result and check if the Properties is missing
// for Properties values it will check if the type is string.
func validateObjectResult(tr TaskResult) (errs *apis.FieldError) {
if ParamType(tr.Type) == ParamTypeObject && tr.Properties == nil {
return apis.ErrMissingField(tr.Name + ".properties")
}
invalidKeys := []string{}
for key, propertySpec := range tr.Properties {
if propertySpec.Type != ParamTypeString {
invalidKeys = append(invalidKeys, key)
}
}
if len(invalidKeys) != 0 {
return &apis.FieldError{
Message: fmt.Sprintf("The value type specified for these keys %v is invalid, the type must be string", invalidKeys),
Paths: []string{tr.Name + ".properties"},
}
}
return nil
}
// validateValue validates the value of the TaskResult.
// It requires that the value is of type string
// and format $(steps.<stepName>.results.<resultName>)
func (tr TaskResult) validateValue(ctx context.Context) (errs *apis.FieldError) {
if tr.Value == nil {
return nil
}
if tr.Value.Type != ParamTypeString {
return &apis.FieldError{
Message: fmt.Sprintf(
"Invalid Type. Wanted string but got: \"%v\"", tr.Value.Type),
Paths: []string{
tr.Name + ".type",
},
}
}
if tr.Value.StringVal != "" {
stepName, resultName, err := ExtractStepResultName(tr.Value.StringVal)
if err != nil {
return &apis.FieldError{
Message: fmt.Sprintf("%v", err),
Paths: []string{tr.Name + ".value"},
}
}
if e := validation.IsDNS1123Label(stepName); len(e) > 0 {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("invalid extracted step name %q", stepName),
Paths: []string{tr.Name + ".value"},
Details: "stepName in $(steps.<stepName>.results.<resultName>) must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
})
}
if !resultNameFormatRegex.MatchString(resultName) {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("invalid extracted result name %q", resultName),
Paths: []string{tr.Name + ".value"},
Details: fmt.Sprintf("resultName in $(steps.<stepName>.results.<resultName>) must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat),
})
}
}
return errs
}
// Validate implements apis.Validatable
func (sr StepResult) Validate(ctx context.Context) (errs *apis.FieldError) {
if !resultNameFormatRegex.MatchString(sr.Name) {
return apis.ErrInvalidKeyName(sr.Name, "name", fmt.Sprintf("Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat))
}
switch {
case sr.Type == ResultsTypeObject:
return validateObjectStepResult(sr)
case sr.Type == ResultsTypeArray:
return nil
// The Type is string by default if it is empty.
case sr.Type == "":
return nil
case sr.Type == ResultsTypeString:
return nil
default:
return apis.ErrInvalidValue(sr.Type, "type", fmt.Sprintf("invalid type %s", sr.Type))
}
}
// validateObjectStepResult validates the object result and check if the Properties is missing
// for Properties values it will check if the type is string.
func validateObjectStepResult(sr StepResult) (errs *apis.FieldError) {
if ParamType(sr.Type) == ParamTypeObject && sr.Properties == nil {
return apis.ErrMissingField(sr.Name + ".properties")
}
invalidKeys := []string{}
for key, propertySpec := range sr.Properties {
// In case we need to support other types in the future like the nested objects #7069
if propertySpec.Type != ParamTypeString {
invalidKeys = append(invalidKeys, key)
}
}
if len(invalidKeys) != 0 {
return &apis.FieldError{
Message: fmt.Sprintf("the value type specified for these keys %v is invalid, the type must be string", invalidKeys),
Paths: []string{sr.Name + ".properties"},
}
}
return nil
}
// ExtractStepResultName extracts the step name and result name from a string matching
// formtat $(steps.<stepName>.results.<resultName>).
// If a match is not found, an error is retured.
func ExtractStepResultName(value string) (string, string, error) {
re := regexp.MustCompile(`\$\(steps\.(.*?)\.results\.(.*?)\)`)
rs := re.FindStringSubmatch(value)
if len(rs) != 3 {
return "", "", fmt.Errorf("Could not extract step name and result name. Expected value to look like $(steps.<stepName>.results.<resultName>) but got \"%v\"", value)
}
return rs[1], rs[2], nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"regexp"
"strings"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
)
// ResultRef is a type that represents a reference to a task run result
type ResultRef struct {
PipelineTask string `json:"pipelineTask"`
Result string `json:"result"`
ResultsIndex *int `json:"resultsIndex"`
Property string `json:"property"`
}
const (
// ResultTaskPart Constant used to define the "tasks" part of a pipeline result reference
// retained because of backwards compatibility
ResultTaskPart = resultref.ResultTaskPart
// ResultFinallyPart Constant used to define the "finally" part of a pipeline result reference
// retained because of backwards compatibility
ResultFinallyPart = resultref.ResultFinallyPart
// ResultResultPart Constant used to define the "results" part of a pipeline result reference
// retained because of backwards compatibility
ResultResultPart = resultref.ResultResultPart
// TODO(#2462) use one regex across all substitutions
// variableSubstitutionFormat matches format like $result.resultname, $result.resultname[int] and $result.resultname[*]
variableSubstitutionFormat = `\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)`
// exactVariableSubstitutionFormat matches strings that only contain a single reference to result or param variables, but nothing else
// i.e. `$(result.resultname)` is a match, but `foo $(result.resultname)` is not.
exactVariableSubstitutionFormat = `^\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)$`
// ResultNameFormat Constant used to define the regex Result.Name should follow
ResultNameFormat = `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$`
)
// VariableSubstitutionRegex is a regex to find all result matching substitutions
var VariableSubstitutionRegex = regexp.MustCompile(variableSubstitutionFormat)
var exactVariableSubstitutionRegex = regexp.MustCompile(exactVariableSubstitutionFormat)
var resultNameFormatRegex = regexp.MustCompile(ResultNameFormat)
// NewResultRefs extracts all ResultReferences from a param or a pipeline result.
// If the ResultReference can be extracted, they are returned. Expressions which are not
// results are ignored.
func NewResultRefs(expressions []string) []*ResultRef {
var resultRefs []*ResultRef
for _, expression := range expressions {
pr, err := resultref.ParseTaskExpression(expression)
// If the expression isn't a result but is some other expression,
// parseTaskExpression will return an error, in which case we just skip that expression,
// since although it's not a result ref, it might be some other kind of reference
if err == nil {
resultRefs = append(resultRefs, &ResultRef{
PipelineTask: pr.ResourceName,
Result: pr.ResultName,
ResultsIndex: pr.ArrayIdx,
Property: pr.ObjectKey,
})
}
}
return resultRefs
}
// LooksLikeContainsResultRefs attempts to check if param or a pipeline result looks like it contains any
// result references.
// This is useful if we want to make sure the param looks like a ResultReference before
// performing strict validation
func LooksLikeContainsResultRefs(expressions []string) bool {
for _, expression := range expressions {
if resultref.LooksLikeResultRef(expression) {
return true
}
}
return false
}
func validateString(value string) []string {
expressions := VariableSubstitutionRegex.FindAllString(value, -1)
if expressions == nil {
return nil
}
var result []string
for _, expression := range expressions {
result = append(result, stripVarSubExpression(expression))
}
return result
}
func stripVarSubExpression(expression string) string {
return strings.TrimSuffix(strings.TrimPrefix(expression, "$("), ")")
}
// ParseResultName parse the input string to extract resultName and result index.
// Array indexing:
// Input: anArrayResult[1]
// Output: anArrayResult, "1"
// Array star reference:
// Input: anArrayResult[*]
// Output: anArrayResult, "*"
// retained for backwards compatibility
func ParseResultName(resultName string) (string, string) {
return resultref.ParseResultName(resultName)
}
// PipelineTaskResultRefs walks all the places a result reference can be used
// in a PipelineTask and returns a list of any references that are found.
func PipelineTaskResultRefs(pt *PipelineTask) []*ResultRef {
refs := []*ResultRef{}
// TODO move the whenExpression.GetVarSubstitutionExpressions() and GetVarSubstitutionExpressionsForParam(p) as well
// separate cleanup, reference https://github.com/tektoncd/pipeline/pull/7121
for _, p := range pt.extractAllParams() {
expressions, _ := p.GetVarSubstitutionExpressions()
refs = append(refs, NewResultRefs(expressions)...)
}
for _, whenExpression := range pt.When {
expressions, _ := whenExpression.GetVarSubstitutionExpressions()
refs = append(refs, NewResultRefs(expressions)...)
}
taskSubExpressions := pt.GetVarSubstitutionExpressions()
refs = append(refs, NewResultRefs(taskSubExpressions)...)
return refs
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"knative.dev/pkg/apis"
)
var _ apis.Convertible = (*Task)(nil)
// ConvertTo implements apis.Convertible
func (t *Task) ConvertTo(ctx context.Context, sink apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
return fmt.Errorf("v1 is the highest known version, got: %T", sink)
}
// ConvertFrom implements apis.Convertible
func (t *Task) ConvertFrom(ctx context.Context, source apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
return fmt.Errorf("v1 is the highest known version, got: %T", source)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"github.com/tektoncd/pipeline/pkg/apis/config"
"knative.dev/pkg/apis"
)
var _ apis.Defaultable = (*Task)(nil)
// SetDefaults implements apis.Defaultable
func (t *Task) SetDefaults(ctx context.Context) {
t.Spec.SetDefaults(ctx)
}
// SetDefaults set any defaults for the task spec
func (ts *TaskSpec) SetDefaults(ctx context.Context) {
cfg := config.FromContextOrDefaults(ctx)
for _, s := range ts.Steps {
if s.Ref != nil && s.Ref.Name == "" && s.Ref.Resolver == "" {
s.Ref.Resolver = ResolverName(cfg.Defaults.DefaultResolverType)
}
}
for i := range ts.Params {
ts.Params[i].SetDefaults(ctx)
}
for i := range ts.Results {
ts.Results[i].SetDefaults(ctx)
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/kmeta"
)
// +genclient
// +genclient:noStatus
// +genreconciler:krshapedlogic=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Task represents a collection of sequential steps that are run as part of a
// Pipeline using a set of inputs and producing a set of outputs. Tasks execute
// when TaskRuns are created that provide the input parameters and resources and
// output resources the Task requires.
//
// +k8s:openapi-gen=true
// +kubebuilder:storageversion
type Task struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata"`
// Spec holds the desired state of the Task from the client
// +optional
Spec TaskSpec `json:"spec"`
}
var _ kmeta.OwnerRefable = (*Task)(nil)
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*Task) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind(pipeline.TaskControllerName)
}
// Checksum computes the sha256 checksum of the task object.
// Prior to computing the checksum, it performs some preprocessing on the
// metadata of the object where it removes system provided annotations.
// Only the name, namespace, generateName, user-provided labels and annotations
// and the taskSpec are included for the checksum computation.
func (t *Task) Checksum() ([]byte, error) {
objectMeta := checksum.PrepareObjectMeta(t)
preprocessedTask := Task{
TypeMeta: metav1.TypeMeta{
APIVersion: "tekton.dev/v1",
Kind: "Task"},
ObjectMeta: objectMeta,
Spec: t.Spec,
}
sha256Checksum, err := checksum.ComputeSha256Checksum(preprocessedTask)
if err != nil {
return nil, err
}
return sha256Checksum, nil
}
// +listType=atomic
type Volumes []corev1.Volume
// TaskSpec defines the desired state of Task.
type TaskSpec struct {
// Params is a list of input parameters required to run the task. Params
// must be supplied as inputs in TaskRuns unless they declare a default
// value.
// +optional
Params ParamSpecs `json:"params,omitempty"`
// DisplayName is a user-facing name of the task that may be
// used to populate a UI.
// +optional
DisplayName string `json:"displayName,omitempty"`
// Description is a user-facing description of the task that may be
// used to populate a UI.
// +optional
Description string `json:"description,omitempty"`
// Steps are the steps of the build; each step is run sequentially with the
// source mounted into /workspace.
// +listType=atomic
Steps []Step `json:"steps,omitempty"`
// Volumes is a collection of volumes that are available to mount into the
// steps of the build.
// See Pod.spec.volumes (API version: v1)
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Volumes Volumes `json:"volumes,omitempty"`
// StepTemplate can be used as the basis for all step containers within the
// Task, so that the steps inherit settings on the base container.
StepTemplate *StepTemplate `json:"stepTemplate,omitempty"`
// Sidecars are run alongside the Task's step containers. They begin before
// the steps start and end after the steps complete.
// +listType=atomic
Sidecars []Sidecar `json:"sidecars,omitempty"`
// Workspaces are the volumes that this Task requires.
// +listType=atomic
Workspaces []WorkspaceDeclaration `json:"workspaces,omitempty"`
// Results are values that this Task can output
// +listType=atomic
Results []TaskResult `json:"results,omitempty"`
}
// TaskList contains a list of Task
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type TaskList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []Task `json:"items"`
}
// StepList is a list of Steps
type StepList []Step
// SidecarList is a list of Sidecars
type SidecarList []Sidecar
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"path/filepath"
"regexp"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/substitution"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
const (
// stringAndArrayVariableNameFormat is the regex to validate if string/array variable name format follows the following rules.
// - Must only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)
// - Must begin with a letter or an underscore (_)
stringAndArrayVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9.-]*$"
// objectVariableNameFormat is the regex used to validate object name and key names format
// The difference with the array or string name format is that object variable names shouldn't contain dots.
objectVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9-]*$"
)
var (
_ apis.Validatable = (*Task)(nil)
_ resourcesemantics.VerbLimited = (*Task)(nil)
)
// SupportedVerbs returns the operations that validation should be called for
func (t *Task) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
var (
stringAndArrayVariableNameFormatRegex = regexp.MustCompile(stringAndArrayVariableNameFormat)
objectVariableNameFormatRegex = regexp.MustCompile(objectVariableNameFormat)
)
// Validate implements apis.Validatable
func (t *Task) Validate(ctx context.Context) *apis.FieldError {
errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata")
errs = errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec"))
// When a Task is created directly, instead of declared inline in a TaskRun or PipelineRun,
// we do not support propagated parameters. Validate that all params it uses are declared.
errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.Spec.Steps, t.Spec.Params).ViaField("spec"))
return errs
}
// Validate implements apis.Validatable
func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
if len(ts.Steps) == 0 {
errs = errs.Also(apis.ErrMissingField("steps"))
}
errs = errs.Also(ValidateVolumes(ts.Volumes).ViaField("volumes"))
errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces"))
errs = errs.Also(validateWorkspaceUsages(ctx, ts))
mergedSteps, err := MergeStepsWithStepTemplate(ts.StepTemplate, ts.Steps)
if err != nil {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("error merging step template and steps: %s", err),
Paths: []string{"stepTemplate"},
Details: err.Error(),
})
}
errs = errs.Also(StepList(mergedSteps).Validate(ctx).ViaField("steps"))
errs = errs.Also(SidecarList(ts.Sidecars).Validate(ctx).ViaField("sidecars"))
errs = errs.Also(ValidateParameterTypes(ctx, ts.Params).ViaField("params"))
errs = errs.Also(ValidateParameterVariables(ctx, ts.Steps, ts.Params))
errs = errs.Also(validateTaskContextVariables(ctx, ts.Steps))
errs = errs.Also(validateTaskResultsVariables(ctx, ts.Steps, ts.Results))
errs = errs.Also(validateResults(ctx, ts.Results).ViaField("results"))
return errs
}
// ValidateUsageOfDeclaredParameters validates that all parameters referenced in the Task are declared by the Task.
func ValidateUsageOfDeclaredParameters(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError {
var errs *apis.FieldError
_, _, objectParams := params.SortByType()
allParameterNames := sets.NewString(params.GetNames()...)
errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames))
errs = errs.Also(validateObjectUsage(ctx, steps, objectParams))
errs = errs.Also(ValidateObjectParamsHaveProperties(ctx, params))
return errs
}
// ValidateObjectParamsHaveProperties returns an error if any declared object params are missing properties
func ValidateObjectParamsHaveProperties(ctx context.Context, params ParamSpecs) *apis.FieldError {
var errs *apis.FieldError
for _, p := range params {
if p.Type == ParamTypeObject && p.Properties == nil {
errs = errs.Also(apis.ErrMissingField(p.Name + ".properties"))
}
}
return errs
}
func validateResults(ctx context.Context, results []TaskResult) (errs *apis.FieldError) {
for index, result := range results {
errs = errs.Also(result.Validate(ctx).ViaIndex(index))
}
return errs
}
// a mount path which conflicts with any other declared workspaces, with the explicitly
// declared volume mounts, or with the stepTemplate. The names must also be unique.
func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *StepTemplate) (errs *apis.FieldError) {
mountPaths := sets.NewString()
for _, step := range steps {
for _, vm := range step.VolumeMounts {
mountPaths.Insert(filepath.Clean(vm.MountPath))
}
}
if stepTemplate != nil {
for _, vm := range stepTemplate.VolumeMounts {
mountPaths.Insert(filepath.Clean(vm.MountPath))
}
}
wsNames := sets.NewString()
for idx, w := range workspaces {
// Workspace names must be unique
if wsNames.Has(w.Name) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace name %q must be unique", w.Name), "name").ViaIndex(idx))
} else {
wsNames.Insert(w.Name)
}
// Workspaces must not try to use mount paths that are already used
mountPath := filepath.Clean(w.GetMountPath())
if _, ok := mountPaths[mountPath]; ok {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace mount path %q must be unique", mountPath), "mountpath").ViaIndex(idx))
}
mountPaths[mountPath] = struct{}{}
}
return errs
}
// validateWorkspaceUsages checks that all WorkspaceUsage objects in Steps
// refer to workspaces that are defined in the Task.
//
// This is a beta feature and will fail validation if it's used by a step
// or sidecar when the enable-api-fields feature gate is anything but "beta".
func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.FieldError) {
workspaces := ts.Workspaces
steps := ts.Steps
sidecars := ts.Sidecars
wsNames := sets.NewString()
for _, w := range workspaces {
wsNames.Insert(w.Name)
}
for stepIdx, step := range steps {
if len(step.Workspaces) != 0 {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "step workspaces", config.BetaAPIFields).ViaIndex(stepIdx).ViaField("steps"))
}
for workspaceIdx, w := range step.Workspaces {
if !wsNames.Has(w.Name) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("undefined workspace %q", w.Name), "name").ViaIndex(workspaceIdx).ViaField("workspaces").ViaIndex(stepIdx).ViaField("steps"))
}
}
}
for sidecarIdx, sidecar := range sidecars {
if len(sidecar.Workspaces) != 0 {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.BetaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars"))
}
for workspaceIdx, w := range sidecar.Workspaces {
if !wsNames.Has(w.Name) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("undefined workspace %q", w.Name), "name").ViaIndex(workspaceIdx).ViaField("workspaces").ViaIndex(sidecarIdx).ViaField("sidecars"))
}
}
}
return errs
}
// ValidateVolumes validates a slice of volumes to make sure there are no duplicate names
func ValidateVolumes(volumes []corev1.Volume) (errs *apis.FieldError) {
// Task must not have duplicate volume names.
vols := sets.NewString()
for idx, v := range volumes {
if vols.Has(v.Name) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("multiple volumes with same name %q", v.Name), "name").ViaIndex(idx))
} else {
vols.Insert(v.Name)
}
}
return errs
}
// Validate implements apis.Validatable
func (l StepList) Validate(ctx context.Context) (errs *apis.FieldError) {
// Task must not have duplicate step names.
names := sets.NewString()
for idx, s := range l {
// names cannot be duplicated - checking that Step names are unique
if s.Name != "" {
if names.Has(s.Name) {
errs = errs.Also(apis.ErrMultipleOneOf("name").ViaIndex(idx))
}
names.Insert(s.Name)
}
errs = errs.Also(s.Validate(ctx).ViaIndex(idx))
if s.Results != nil {
errs = errs.Also(ValidateStepResultsVariables(ctx, s.Results, s.Script).ViaIndex(idx))
errs = errs.Also(ValidateStepResults(ctx, s.Results).ViaIndex(idx).ViaField("results"))
}
if len(s.When) > 0 {
errs = errs.Also(s.When.validate(ctx).ViaIndex(idx))
}
}
return errs
}
// ValidateStepResults validates that all of the declared StepResults are valid.
func ValidateStepResults(ctx context.Context, results []StepResult) (errs *apis.FieldError) {
for index, result := range results {
errs = errs.Also(result.Validate(ctx).ViaIndex(index))
}
return errs
}
// ValidateStepResultsVariables validates if the StepResults referenced in step script are defined in step's results.
func ValidateStepResultsVariables(ctx context.Context, results []StepResult, script string) (errs *apis.FieldError) {
resultsNames := sets.NewString()
for _, r := range results {
resultsNames.Insert(r.Name)
}
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(script, "step.results", resultsNames).ViaField("script"))
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(script, "results", resultsNames).ViaField("script"))
return errs
}
func (l SidecarList) Validate(ctx context.Context) (errs *apis.FieldError) {
for _, sc := range l {
errs = errs.Also(sc.Validate(ctx))
}
return errs
}
// ValidateParameterTypes validates all the types within a slice of ParamSpecs
func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) {
for _, p := range params {
errs = errs.Also(p.ValidateType(ctx))
}
return errs
}
// ValidateType checks that the type of a ParamSpec is allowed and its default value matches that type
func (p ParamSpec) ValidateType(ctx context.Context) *apis.FieldError {
// Ensure param has a valid type.
validType := false
for _, allowedType := range AllParamTypes {
if p.Type == allowedType {
validType = true
}
}
if !validType {
return apis.ErrInvalidValue(p.Type, p.Name+".type")
}
// If a default value is provided, ensure its type matches param's declared type.
if (p.Default != nil) && (p.Default.Type != p.Type) {
return &apis.FieldError{
Message: fmt.Sprintf(
"\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type),
Paths: []string{
p.Name + ".type",
p.Name + ".default.type",
},
}
}
// Check object type and its PropertySpec type
return p.ValidateObjectType(ctx)
}
// ValidateObjectType checks that object type parameter does not miss the
// definition of `properties` section and the type of a PropertySpec is allowed.
// (Currently, only string is allowed)
func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError {
invalidKeys := []string{}
for key, propertySpec := range p.Properties {
if propertySpec.Type != ParamTypeString {
invalidKeys = append(invalidKeys, key)
}
}
if len(invalidKeys) != 0 {
return &apis.FieldError{
Message: fmt.Sprintf("The value type specified for these keys %v is invalid", invalidKeys),
Paths: []string{p.Name + ".properties"},
}
}
return nil
}
// ValidateParameterVariables validates all variables within a slice of ParamSpecs against a slice of Steps
func ValidateParameterVariables(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError {
var errs *apis.FieldError
errs = errs.Also(params.ValidateNoDuplicateNames())
errs = errs.Also(params.validateParamEnums(ctx).ViaField("params"))
stringParams, arrayParams, objectParams := params.SortByType()
stringParameterNames := sets.NewString(stringParams.GetNames()...)
arrayParameterNames := sets.NewString(arrayParams.GetNames()...)
errs = errs.Also(ValidateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParams))
return errs.Also(validateArrayUsage(steps, "params", arrayParameterNames))
}
// validateTaskContextVariables returns an error if any Steps reference context variables that don't exist.
func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError {
taskRunContextNames := sets.NewString().Insert(
"name",
"namespace",
"uid",
)
taskContextNames := sets.NewString().Insert(
"name",
"retry-count",
)
errs := validateVariables(ctx, steps, "context\\.taskRun", taskRunContextNames)
return errs.Also(validateVariables(ctx, steps, "context\\.task", taskContextNames))
}
// validateTaskResultsVariables validates if the results referenced in step script are defined in task results
func validateTaskResultsVariables(ctx context.Context, steps []Step, results []TaskResult) (errs *apis.FieldError) {
resultsNames := sets.NewString()
for _, r := range results {
resultsNames.Insert(r.Name)
}
for idx, step := range steps {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx))
}
return errs
}
// validateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object
func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) (errs *apis.FieldError) {
objectParameterNames := sets.NewString()
for _, p := range params {
// collect all names of object type params
objectParameterNames.Insert(p.Name)
// collect all keys for this object param
objectKeys := sets.NewString()
for key := range p.Properties {
objectKeys.Insert(key)
}
// check if the object's key names are referenced correctly i.e. param.objectParam.key1
errs = errs.Also(validateVariables(ctx, steps, "params\\."+p.Name, objectKeys))
}
return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames))
}
// validateObjectUsageAsWhole returns an error if the Steps contain references to the entire input object params in fields where these references are prohibited
func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) {
for idx, step := range steps {
errs = errs.Also(validateStepObjectUsageAsWhole(step, prefix, vars)).ViaFieldIndex("steps", idx)
}
return errs
}
// validateStepObjectUsageAsWhole returns an error if the Step contains references to the entire input object params in fields where these references are prohibited
func validateStepObjectUsageAsWhole(step Step, prefix string, vars sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Name, prefix, vars).ViaField("name")
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Image, prefix, vars).ViaField("image"))
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.WorkingDir, prefix, vars).ViaField("workingDir"))
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Script, prefix, vars).ViaField("script"))
for i, cmd := range step.Command {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(cmd, prefix, vars).ViaFieldIndex("command", i))
}
for i, arg := range step.Args {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(arg, prefix, vars).ViaFieldIndex("args", i))
}
for _, env := range step.Env {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name))
}
for i, v := range step.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i))
}
return errs
}
// validateArrayUsage returns an error if the Steps contain references to the input array params in fields where these references are prohibited
func validateArrayUsage(steps []Step, prefix string, arrayParamNames sets.String) (errs *apis.FieldError) {
for idx, step := range steps {
errs = errs.Also(validateStepArrayUsage(step, prefix, arrayParamNames)).ViaFieldIndex("steps", idx)
}
return errs
}
// validateStepArrayUsage returns an error if the Step contains references to the input array params in fields where these references are prohibited
func validateStepArrayUsage(step Step, prefix string, arrayParamNames sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToProhibitedVariables(step.Name, prefix, arrayParamNames).ViaField("name")
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Image, prefix, arrayParamNames).ViaField("image"))
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.WorkingDir, prefix, arrayParamNames).ViaField("workingDir"))
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Script, prefix, arrayParamNames).ViaField("script"))
for i, cmd := range step.Command {
errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(cmd, prefix, arrayParamNames).ViaFieldIndex("command", i))
}
for i, arg := range step.Args {
errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(arg, prefix, arrayParamNames).ViaFieldIndex("args", i))
}
for _, env := range step.Env {
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(env.Value, prefix, arrayParamNames).ViaFieldKey("env", env.Name))
}
for i, v := range step.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.Name, prefix, arrayParamNames).ViaField("name").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.MountPath, prefix, arrayParamNames).ViaField("mountPath").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.SubPath, prefix, arrayParamNames).ViaField("subPath").ViaFieldIndex("volumeMount", i))
}
return errs
}
// validateVariables returns an error if the Steps contain references to any unknown variables
func validateVariables(ctx context.Context, steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) {
for idx, step := range steps {
errs = errs.Also(validateStepVariables(ctx, step, prefix, vars).ViaFieldIndex("steps", idx))
}
return errs
}
// ValidateNameFormat validates that the name format of all param types follows the rules
func ValidateNameFormat(stringAndArrayParams sets.String, objectParams []ParamSpec) (errs *apis.FieldError) {
// checking string or array name format
// ----
invalidStringAndArrayNames := []string{}
// Converting to sorted list here rather than just looping map keys
// because we want the order of items in vars to be deterministic for purpose of unit testing
for _, name := range stringAndArrayParams.List() {
if !stringAndArrayVariableNameFormatRegex.MatchString(name) {
invalidStringAndArrayNames = append(invalidStringAndArrayNames, name)
}
}
if len(invalidStringAndArrayNames) != 0 {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("The format of following array and string variable names is invalid: %s", invalidStringAndArrayNames),
Paths: []string{"params"},
Details: "String/Array Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)\nMust begin with a letter or an underscore (_)",
})
}
// checking object name and key name format
// -----
invalidObjectNames := map[string][]string{}
for _, obj := range objectParams {
// check object param name
if !objectVariableNameFormatRegex.MatchString(obj.Name) {
invalidObjectNames[obj.Name] = []string{}
}
// check key names
for k := range obj.Properties {
if !objectVariableNameFormatRegex.MatchString(k) {
invalidObjectNames[obj.Name] = append(invalidObjectNames[obj.Name], k)
}
}
}
if len(invalidObjectNames) != 0 {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("Object param name and key name format is invalid: %s", invalidObjectNames),
Paths: []string{"params"},
Details: "Object Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_) \nMust begin with a letter or an underscore (_)",
})
}
return errs
}
// validateStepVariables returns an error if the Step contains references to any unknown variables
func validateStepVariables(ctx context.Context, step Step, prefix string, vars sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToUnknownVariables(step.Name, prefix, vars).ViaField("name")
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Image, prefix, vars).ViaField("image"))
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.WorkingDir, prefix, vars).ViaField("workingDir"))
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, prefix, vars).ViaField("script"))
for i, cmd := range step.Command {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(cmd, prefix, vars).ViaFieldIndex("command", i))
}
for i, arg := range step.Args {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(arg, prefix, vars).ViaFieldIndex("args", i))
}
for _, env := range step.Env {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name))
}
for i, v := range step.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i))
}
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(string(step.OnError), prefix, vars).ViaField("onError"))
return errs
}
// GetIndexingReferencesToArrayParams returns all strings referencing indices of TaskRun array parameters
// from parameters, workspaces, and when expressions defined in the Task.
// For example, if a Task has a parameter with a value "$(params.array-param-name[1])",
// this would be one of the strings returned.
func (ts *TaskSpec) GetIndexingReferencesToArrayParams() sets.String {
// collect all the possible places to use param references
paramsRefs := []string{}
paramsRefs = append(paramsRefs, extractParamRefsFromSteps(ts.Steps)...)
paramsRefs = append(paramsRefs, extractParamRefsFromStepTemplate(ts.StepTemplate)...)
paramsRefs = append(paramsRefs, extractParamRefsFromVolumes(ts.Volumes)...)
for _, v := range ts.Workspaces {
paramsRefs = append(paramsRefs, v.MountPath)
}
paramsRefs = append(paramsRefs, extractParamRefsFromSidecars(ts.Sidecars)...)
// extract all array indexing references, for example []{"$(params.array-params[1])"}
arrayIndexParamRefs := []string{}
for _, p := range paramsRefs {
arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...)
}
return sets.NewString(arrayIndexParamRefs...)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
// TaskRef can be used to refer to a specific instance of a task.
type TaskRef struct {
// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
Name string `json:"name,omitempty"`
// TaskKind indicates the Kind of the Task:
// 1. Namespaced Task when Kind is set to "Task". If Kind is "", it defaults to "Task".
// 2. Custom Task when Kind is non-empty and APIVersion is non-empty
Kind TaskKind `json:"kind,omitempty"`
// API version of the referent
// Note: A Task with non-empty APIVersion and Kind is considered a Custom Task
// +optional
APIVersion string `json:"apiVersion,omitempty"`
// ResolverRef allows referencing a Task in a remote location
// like a git repo. This field is only supported when the alpha
// feature gate is enabled.
// +optional
ResolverRef `json:",omitempty"`
}
// TaskKind defines the type of Task used by the pipeline.
type TaskKind string
const (
// NamespacedTaskKind indicates that the task type has a namespaced scope.
NamespacedTaskKind TaskKind = "Task"
)
// IsCustomTask checks whether the reference is to a Custom Task
func (tr *TaskRef) IsCustomTask() bool {
// Note that if `apiVersion` is set to `"tekton.dev/v1beta1"` and `kind` is set to `"Task"`,
// the reference will be considered a Custom Task - https://github.com/tektoncd/pipeline/issues/6457
return tr != nil && tr.APIVersion != "" && tr.Kind != ""
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"knative.dev/pkg/apis"
)
// Validate ensures that a supplied TaskRef field is populated
// correctly. No errors are returned for a nil TaskRef.
func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) {
if ref == nil {
return errs
}
return validateRef(ctx, ref.Name, ref.Resolver, ref.Params)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"knative.dev/pkg/apis"
)
var _ apis.Convertible = (*TaskRun)(nil)
// ConvertTo implements apis.Convertible
func (tr *TaskRun) ConvertTo(ctx context.Context, sink apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
return fmt.Errorf("v1 is the highest known version, got: %T", sink)
}
// ConvertFrom implements apis.Convertible
func (tr *TaskRun) ConvertFrom(ctx context.Context, source apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
return fmt.Errorf("v1 is the highest known version, got: %T", source)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/kmap"
)
var _ apis.Defaultable = (*TaskRun)(nil)
// ManagedByLabelKey is the label key used to mark what is managing this resource
const ManagedByLabelKey = "app.kubernetes.io/managed-by"
// SetDefaults implements apis.Defaultable
func (tr *TaskRun) SetDefaults(ctx context.Context) {
ctx = apis.WithinParent(ctx, tr.ObjectMeta)
tr.Spec.SetDefaults(ctx)
// Silently filtering out Tekton Reserved annotations at creation
if apis.IsInCreate(ctx) {
tr.ObjectMeta.Annotations = kmap.Filter(tr.ObjectMeta.Annotations, func(s string) bool {
return filterReservedAnnotationRegexp.MatchString(s)
})
}
// If the TaskRun doesn't have a managed-by label, apply the default
// specified in the config.
cfg := config.FromContextOrDefaults(ctx)
if tr.ObjectMeta.Labels == nil {
tr.ObjectMeta.Labels = map[string]string{}
}
if _, found := tr.ObjectMeta.Labels[ManagedByLabelKey]; !found {
tr.ObjectMeta.Labels[ManagedByLabelKey] = cfg.Defaults.DefaultManagedByLabelValue
}
}
// SetDefaults implements apis.Defaultable
func (trs *TaskRunSpec) SetDefaults(ctx context.Context) {
cfg := config.FromContextOrDefaults(ctx)
if trs.TaskRef != nil {
if trs.TaskRef.Name == "" && trs.TaskRef.Resolver == "" {
trs.TaskRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType)
}
if trs.TaskRef.Kind == "" && trs.TaskRef.Resolver == "" {
trs.TaskRef.Kind = NamespacedTaskKind
}
}
if trs.Timeout == nil {
trs.Timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute}
}
defaultSA := cfg.Defaults.DefaultServiceAccount
if trs.ServiceAccountName == "" && defaultSA != "" {
trs.ServiceAccountName = defaultSA
}
defaultPodTemplate := cfg.Defaults.DefaultPodTemplate
trs.PodTemplate = pod.MergePodTemplateWithDefault(trs.PodTemplate, defaultPodTemplate)
// If this taskrun has an embedded task, apply the usual task defaults
if trs.TaskSpec != nil {
trs.TaskSpec.SetDefaults(ctx)
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
apisconfig "github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
// TaskRunSpec defines the desired state of TaskRun
type TaskRunSpec struct {
// +optional
Debug *TaskRunDebug `json:"debug,omitempty"`
// +optional
Params Params `json:"params,omitempty"`
// +optional
ServiceAccountName string `json:"serviceAccountName"`
// no more than one of the TaskRef and TaskSpec may be specified.
// +optional
TaskRef *TaskRef `json:"taskRef,omitempty"`
// Specifying TaskSpec can be disabled by setting
// `disable-inline-spec` feature flag.
// See Task.spec (API version: tekton.dev/v1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
TaskSpec *TaskSpec `json:"taskSpec,omitempty"`
// Used for cancelling a TaskRun (and maybe more later on)
// +optional
Status TaskRunSpecStatus `json:"status,omitempty"`
// Status message for cancellation.
// +optional
StatusMessage TaskRunSpecStatusMessage `json:"statusMessage,omitempty"`
// Retries represents how many times this TaskRun should be retried in the event of task failure.
// +optional
Retries int `json:"retries,omitempty"`
// Time after which one retry attempt times out. Defaults to 1 hour.
// Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// PodTemplate holds pod specific configuration
PodTemplate *pod.PodTemplate `json:"podTemplate,omitempty"`
// Workspaces is a list of WorkspaceBindings from volumes to workspaces.
// +optional
// +listType=atomic
Workspaces []WorkspaceBinding `json:"workspaces,omitempty"`
// Specs to apply to Steps in this TaskRun.
// If a field is specified in both a Step and a StepSpec,
// the value from the StepSpec will be used.
// This field is only supported when the alpha feature gate is enabled.
// +optional
// +listType=atomic
StepSpecs []TaskRunStepSpec `json:"stepSpecs,omitempty"`
// Specs to apply to Sidecars in this TaskRun.
// If a field is specified in both a Sidecar and a SidecarSpec,
// the value from the SidecarSpec will be used.
// This field is only supported when the alpha feature gate is enabled.
// +optional
// +listType=atomic
SidecarSpecs []TaskRunSidecarSpec `json:"sidecarSpecs,omitempty"`
// Compute resources to use for this TaskRun
ComputeResources *corev1.ResourceRequirements `json:"computeResources,omitempty"`
// ManagedBy indicates which controller is responsible for reconciling
// this resource. If unset or set to "tekton.dev/pipeline", the default
// Tekton controller will manage this resource.
// This field is immutable.
// +optional
ManagedBy *string `json:"managedBy,omitempty"`
}
// TaskRunSpecStatus defines the TaskRun spec status the user can provide
type TaskRunSpecStatus string
const (
// TaskRunSpecStatusCancelled indicates that the user wants to cancel the task,
// if not already cancelled or terminated
TaskRunSpecStatusCancelled = "TaskRunCancelled"
)
// TaskRunSpecStatusMessage defines human readable status messages for the TaskRun.
type TaskRunSpecStatusMessage string
const (
// TaskRunCancelledByPipelineMsg indicates that the PipelineRun of which this
// TaskRun was a part of has been cancelled.
TaskRunCancelledByPipelineMsg TaskRunSpecStatusMessage = "TaskRun cancelled as the PipelineRun it belongs to has been cancelled."
// TaskRunCancelledByPipelineTimeoutMsg indicates that the TaskRun was cancelled because the PipelineRun running it timed out.
TaskRunCancelledByPipelineTimeoutMsg TaskRunSpecStatusMessage = "TaskRun cancelled as the PipelineRun it belongs to has timed out."
)
const (
// EnabledOnFailureBreakpoint is the value for TaskRunDebug.Breakpoints.OnFailure that means the breakpoint onFailure is enabled
EnabledOnFailureBreakpoint = "enabled"
)
// TaskRunDebug defines the breakpoint config for a particular TaskRun
type TaskRunDebug struct {
// +optional
Breakpoints *TaskBreakpoints `json:"breakpoints,omitempty"`
}
// TaskBreakpoints defines the breakpoint config for a particular Task
type TaskBreakpoints struct {
// if enabled, pause TaskRun on failure of a step
// failed step will not exit
// +optional
OnFailure string `json:"onFailure,omitempty"`
// +optional
// +listType=atomic
BeforeSteps []string `json:"beforeSteps,omitempty"`
}
// NeedsDebugOnFailure return true if the TaskRun is configured to debug on failure
func (trd *TaskRunDebug) NeedsDebugOnFailure() bool {
if trd.Breakpoints == nil {
return false
}
return trd.Breakpoints.OnFailure == EnabledOnFailureBreakpoint
}
// NeedsDebugBeforeStep return true if the step is configured to debug before execution
func (trd *TaskRunDebug) NeedsDebugBeforeStep(stepName string) bool {
if trd.Breakpoints == nil {
return false
}
beforeStepSets := sets.NewString(trd.Breakpoints.BeforeSteps...)
return beforeStepSets.Has(stepName)
}
// StepNeedsDebug return true if the step is configured to debug
func (trd *TaskRunDebug) StepNeedsDebug(stepName string) bool {
return trd.NeedsDebugOnFailure() || trd.NeedsDebugBeforeStep(stepName)
}
// NeedsDebug return true if defined onfailure or have any before, after steps
func (trd *TaskRunDebug) NeedsDebug() bool {
return trd.NeedsDebugOnFailure() || trd.HaveBeforeSteps()
}
// HaveBeforeSteps return true if have any before steps
func (trd *TaskRunDebug) HaveBeforeSteps() bool {
return trd.Breakpoints != nil && len(trd.Breakpoints.BeforeSteps) > 0
}
// TaskRunInputs holds the input values that this task was invoked with.
type TaskRunInputs struct {
// +optional
// +listType=atomic
Params Params `json:"params,omitempty"`
}
var taskRunCondSet = apis.NewBatchConditionSet()
// TaskRunStatus defines the observed state of TaskRun
type TaskRunStatus struct {
duckv1.Status `json:",inline"`
// TaskRunStatusFields inlines the status fields.
TaskRunStatusFields `json:",inline"`
}
// TaskRunReason is an enum used to store all TaskRun reason for
// the Succeeded condition that are controlled by the TaskRun itself. Failure
// reasons that emerge from underlying resources are not included here
type TaskRunReason string
const (
// TaskRunReasonStarted is the reason set when the TaskRun has just started
TaskRunReasonStarted TaskRunReason = "Started"
// TaskRunReasonRunning is the reason set when the TaskRun is running
TaskRunReasonRunning TaskRunReason = "Running"
// TaskRunReasonSuccessful is the reason set when the TaskRun completed successfully
TaskRunReasonSuccessful TaskRunReason = "Succeeded"
// TaskRunReasonFailed is the reason set when the TaskRun completed with a failure
TaskRunReasonFailed TaskRunReason = "Failed"
// TaskRunReasonToBeRetried is the reason set when the last TaskRun execution failed, and will be retried
TaskRunReasonToBeRetried TaskRunReason = "ToBeRetried"
// TaskRunReasonCancelled is the reason set when the TaskRun is cancelled by the user
TaskRunReasonCancelled TaskRunReason = "TaskRunCancelled"
// TaskRunReasonTimedOut is the reason set when one TaskRun execution has timed out
TaskRunReasonTimedOut TaskRunReason = "TaskRunTimeout"
// TaskRunReasonResolvingTaskRef indicates that the TaskRun is waiting for
// its taskRef to be asynchronously resolved.
TaskRunReasonResolvingTaskRef = "ResolvingTaskRef"
// TaskRunReasonResolvingStepActionRef indicates that the TaskRun is waiting for
// its StepAction's Ref to be asynchronously resolved.
TaskRunReasonResolvingStepActionRef = "ResolvingStepActionRef"
// TaskRunReasonImagePullFailed is the reason set when the step of a task fails due to image not being pulled
TaskRunReasonImagePullFailed TaskRunReason = "TaskRunImagePullFailed"
// TaskRunReasonResultLargerThanAllowedLimit is the reason set when one of the results exceeds its maximum allowed limit of 1 KB
TaskRunReasonResultLargerThanAllowedLimit TaskRunReason = "TaskRunResultLargerThanAllowedLimit"
// TaskRunReasonStopSidecarFailed indicates that the sidecar is not properly stopped.
TaskRunReasonStopSidecarFailed TaskRunReason = "TaskRunStopSidecarFailed"
// TaskRunReasonInvalidParamValue indicates that the TaskRun Param input value is not allowed.
TaskRunReasonInvalidParamValue TaskRunReason = "InvalidParamValue"
// TaskRunReasonFailedResolution indicated that the reason for failure status is
// that references within the TaskRun could not be resolved
TaskRunReasonFailedResolution TaskRunReason = "TaskRunResolutionFailed"
// TaskRunReasonFailedValidation indicated that the reason for failure status is
// that taskrun failed runtime validation
TaskRunReasonFailedValidation TaskRunReason = "TaskRunValidationFailed"
// TaskRunReasonTaskFailedValidation indicated that the reason for failure status is
// that task failed runtime validation
TaskRunReasonTaskFailedValidation TaskRunReason = "TaskValidationFailed"
// TaskRunReasonResourceVerificationFailed indicates that the task fails the trusted resource verification,
// it could be the content has changed, signature is invalid or public key is invalid
TaskRunReasonResourceVerificationFailed TaskRunReason = "ResourceVerificationFailed"
// TaskRunReasonFailureIgnored is the reason set when the Taskrun has failed due to pod execution error and the failure is ignored for the owning PipelineRun.
// TaskRuns failed due to reconciler/validation error should not use this reason.
TaskRunReasonFailureIgnored TaskRunReason = "FailureIgnored"
)
func (t TaskRunReason) String() string {
return string(t)
}
// GetStartedReason returns the reason set to the "Succeeded" condition when
// InitializeConditions is invoked
func (trs *TaskRunStatus) GetStartedReason() string {
return TaskRunReasonStarted.String()
}
// GetRunningReason returns the reason set to the "Succeeded" condition when
// the TaskRun starts running. This is used indicate that the resource
// could be validated is starting to perform its job.
func (trs *TaskRunStatus) GetRunningReason() string {
return TaskRunReasonRunning.String()
}
// MarkResourceOngoing sets the ConditionSucceeded condition to ConditionUnknown
// with the reason and message.
func (trs *TaskRunStatus) MarkResourceOngoing(reason TaskRunReason, message string) {
taskRunCondSet.Manage(trs).SetCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
Reason: reason.String(),
Message: message,
})
}
// MarkResourceFailed sets the ConditionSucceeded condition to ConditionFalse
// based on an error that occurred and a reason
func (trs *TaskRunStatus) MarkResourceFailed(reason TaskRunReason, err error) {
taskRunCondSet.Manage(trs).SetCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: reason.String(),
Message: pipelineErrors.GetErrorMessage(err),
})
succeeded := trs.GetCondition(apis.ConditionSucceeded)
trs.CompletionTime = &succeeded.LastTransitionTime.Inner
}
// +listType=atomic
type RetriesStatus []TaskRunStatus
// TaskRunStatusFields holds the fields of TaskRun's status. This is defined
// separately and inlined so that other types can readily consume these fields
// via duck typing.
type TaskRunStatusFields struct {
// PodName is the name of the pod responsible for executing this task's steps.
PodName string `json:"podName"`
// StartTime is the time the build is actually started.
StartTime *metav1.Time `json:"startTime,omitempty"`
// CompletionTime is the time the build completed.
CompletionTime *metav1.Time `json:"completionTime,omitempty"`
// Steps describes the state of each build step container.
// +optional
// +listType=atomic
Steps []StepState `json:"steps,omitempty"`
// RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures.
// All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant.
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
RetriesStatus RetriesStatus `json:"retriesStatus,omitempty"`
// Results are the list of results written out by the task's containers
// +optional
// +listType=atomic
Results []TaskRunResult `json:"results,omitempty"`
// Artifacts are the list of artifacts written out by the task's containers
// +optional
Artifacts *Artifacts `json:"artifacts,omitempty"`
// The list has one entry per sidecar in the manifest. Each entry is
// represents the imageid of the corresponding sidecar.
// +listType=atomic
Sidecars []SidecarState `json:"sidecars,omitempty"`
// TaskSpec contains the Spec from the dereferenced Task definition used to instantiate this TaskRun.
TaskSpec *TaskSpec `json:"taskSpec,omitempty"`
// Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).
// +optional
Provenance *Provenance `json:"provenance,omitempty"`
// SpanContext contains tracing span context fields
SpanContext map[string]string `json:"spanContext,omitempty"`
}
// TaskRunStepSpec is used to override the values of a Step in the corresponding Task.
type TaskRunStepSpec struct {
// The name of the Step to override.
Name string `json:"name"`
// The resource requirements to apply to the Step.
ComputeResources corev1.ResourceRequirements `json:"computeResources"`
}
// TaskRunSidecarSpec is used to override the values of a Sidecar in the corresponding Task.
type TaskRunSidecarSpec struct {
// The name of the Sidecar to override.
Name string `json:"name"`
// The resource requirements to apply to the Sidecar.
ComputeResources corev1.ResourceRequirements `json:"computeResources"`
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*TaskRun) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind(pipeline.TaskRunControllerName)
}
// GetStatusCondition returns the task run status as a ConditionAccessor
func (tr *TaskRun) GetStatusCondition() apis.ConditionAccessor {
return &tr.Status
}
// GetCondition returns the Condition matching the given type.
func (trs *TaskRunStatus) GetCondition(t apis.ConditionType) *apis.Condition {
return taskRunCondSet.Manage(trs).GetCondition(t)
}
// InitializeConditions will set all conditions in taskRunCondSet to unknown for the TaskRun
// and set the started time to the current time
func (trs *TaskRunStatus) InitializeConditions() {
started := false
if trs.StartTime.IsZero() {
trs.StartTime = &metav1.Time{Time: time.Now()}
started = true
}
conditionManager := taskRunCondSet.Manage(trs)
conditionManager.InitializeConditions()
// Ensure the started reason is set for the "Succeeded" condition
if started {
initialCondition := conditionManager.GetCondition(apis.ConditionSucceeded)
initialCondition.Reason = TaskRunReasonStarted.String()
conditionManager.SetCondition(*initialCondition)
}
}
// SetCondition sets the condition, unsetting previous conditions with the same
// type as necessary.
func (trs *TaskRunStatus) SetCondition(newCond *apis.Condition) {
if newCond != nil {
taskRunCondSet.Manage(trs).SetCondition(*newCond)
}
}
// StepState reports the results of running a step in a Task.
type StepState struct {
corev1.ContainerState `json:",inline"`
Name string `json:"name,omitempty"`
Container string `json:"container,omitempty"`
ImageID string `json:"imageID,omitempty"`
Results []TaskRunStepResult `json:"results,omitempty"`
Provenance *Provenance `json:"provenance,omitempty"`
TerminationReason string `json:"terminationReason,omitempty"`
Inputs []TaskRunStepArtifact `json:"inputs,omitempty"`
Outputs []TaskRunStepArtifact `json:"outputs,omitempty"`
}
// SidecarState reports the results of running a sidecar in a Task.
type SidecarState struct {
corev1.ContainerState `json:",inline"`
Name string `json:"name,omitempty"`
Container string `json:"container,omitempty"`
ImageID string `json:"imageID,omitempty"`
}
// +genclient
// +genreconciler:krshapedlogic=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// TaskRun represents a single execution of a Task. TaskRuns are how the steps
// specified in a Task are executed; they specify the parameters and resources
// used to run the steps in a Task.
//
// +k8s:openapi-gen=true
// +kubebuilder:storageversion
type TaskRun struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec TaskRunSpec `json:"spec,omitempty"`
// +optional
Status TaskRunStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// TaskRunList contains a list of TaskRun
type TaskRunList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []TaskRun `json:"items"`
}
// GetPipelineRunPVCName for TaskRun gets pipelinerun
func (tr *TaskRun) GetPipelineRunPVCName() string {
if tr == nil {
return ""
}
for _, ref := range tr.GetOwnerReferences() {
if ref.Kind == pipeline.PipelineRunControllerName {
return ref.Name + "-pvc"
}
}
return ""
}
// HasPipelineRunOwnerReference returns true of TaskRun has
// owner reference of type PipelineRun
func (tr *TaskRun) HasPipelineRunOwnerReference() bool {
for _, ref := range tr.GetOwnerReferences() {
if ref.Kind == pipeline.PipelineRunControllerName {
return true
}
}
return false
}
// IsDone returns true if the TaskRun's status indicates that it is done.
func (tr *TaskRun) IsDone() bool {
return !tr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown()
}
// HasStarted function check whether TaskRun has valid start time set in its status
func (tr *TaskRun) HasStarted() bool {
return tr.Status.StartTime != nil && !tr.Status.StartTime.IsZero()
}
// IsSuccessful returns true if the TaskRun's status indicates that it has succeeded.
func (tr *TaskRun) IsSuccessful() bool {
return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue()
}
// IsFailure returns true if the TaskRun's status indicates that it has failed.
func (tr *TaskRun) IsFailure() bool {
return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsFalse()
}
// IsCancelled returns true if the TaskRun's spec status is set to Cancelled state
func (tr *TaskRun) IsCancelled() bool {
return tr.Spec.Status == TaskRunSpecStatusCancelled
}
// IsRetriable returns true if the TaskRun's Retries is not exhausted.
func (tr *TaskRun) IsRetriable() bool {
return len(tr.Status.RetriesStatus) < tr.Spec.Retries
}
// HasTimedOut returns true if the TaskRun runtime is beyond the allowed timeout
func (tr *TaskRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bool {
if tr.Status.StartTime.IsZero() {
return false
}
timeout := tr.GetTimeout(ctx)
// If timeout is set to 0 or defaulted to 0, there is no timeout.
if timeout == apisconfig.NoTimeoutDuration {
return false
}
runtime := c.Since(tr.Status.StartTime.Time)
return runtime > timeout
}
// GetTimeout returns the timeout for the TaskRun, or the default if not specified
func (tr *TaskRun) GetTimeout(ctx context.Context) time.Duration {
// Use the platform default is no timeout is set
if tr.Spec.Timeout == nil {
defaultTimeout := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes)
return defaultTimeout * time.Minute //nolint:durationcheck
}
return tr.Spec.Timeout.Duration
}
// GetNamespacedName returns a k8s namespaced name that identifies this TaskRun
func (tr *TaskRun) GetNamespacedName() types.NamespacedName {
return types.NamespacedName{Namespace: tr.Namespace, Name: tr.Name}
}
// HasVolumeClaimTemplate returns true if TaskRun contains volumeClaimTemplates that is
// used for creating PersistentVolumeClaims with an OwnerReference for each run
func (tr *TaskRun) HasVolumeClaimTemplate() bool {
for _, ws := range tr.Spec.Workspaces {
if ws.VolumeClaimTemplate != nil {
return true
}
}
return false
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
"github.com/tektoncd/pipeline/pkg/apis/validate"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/strings/slices"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var (
_ apis.Validatable = (*TaskRun)(nil)
_ resourcesemantics.VerbLimited = (*TaskRun)(nil)
)
// SupportedVerbs returns the operations that validation should be called for
func (tr *TaskRun) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate taskrun
func (tr *TaskRun) Validate(ctx context.Context) *apis.FieldError {
errs := validate.ObjectMetadata(tr.GetObjectMeta()).ViaField("metadata")
return errs.Also(tr.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec"))
}
// Validate taskrun spec
func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
// Validate the spec changes
errs = errs.Also(ts.ValidateUpdate(ctx))
// Must have exactly one of taskRef and taskSpec.
if ts.TaskRef == nil && ts.TaskSpec == nil {
errs = errs.Also(apis.ErrMissingOneOf("taskRef", "taskSpec"))
}
if ts.TaskRef != nil && ts.TaskSpec != nil {
errs = errs.Also(apis.ErrMultipleOneOf("taskRef", "taskSpec"))
}
// Validate TaskRef if it's present.
if ts.TaskRef != nil {
errs = errs.Also(ts.TaskRef.Validate(ctx).ViaField("taskRef"))
}
// Validate TaskSpec if it's present.
if ts.TaskSpec != nil {
if slices.Contains(strings.Split(
config.FromContextOrDefaults(ctx).FeatureFlags.DisableInlineSpec, ","), "taskrun") {
errs = errs.Also(apis.ErrDisallowedFields("taskSpec"))
}
errs = errs.Also(ts.TaskSpec.Validate(ctx).ViaField("taskSpec"))
}
errs = errs.Also(ValidateParameters(ctx, ts.Params).ViaField("params"))
// Validate propagated parameters
errs = errs.Also(ts.validateInlineParameters(ctx))
errs = errs.Also(ValidateWorkspaceBindings(ctx, ts.Workspaces).ViaField("workspaces"))
if ts.Debug != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "debug", config.AlphaAPIFields).ViaField("debug"))
errs = errs.Also(validateDebug(ts.Debug).ViaField("debug"))
}
if ts.StepSpecs != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepSpecs", config.BetaAPIFields).ViaField("stepSpecs"))
errs = errs.Also(validateStepSpecs(ts.StepSpecs).ViaField("stepSpecs"))
}
if ts.SidecarSpecs != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarSpecs", config.BetaAPIFields).ViaField("sidecarSpecs"))
errs = errs.Also(validateSidecarSpecs(ts.SidecarSpecs).ViaField("sidecarSpecs"))
}
if ts.ComputeResources != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "computeResources", config.BetaAPIFields).ViaField("computeResources"))
errs = errs.Also(validateTaskRunComputeResources(ts.ComputeResources, ts.StepSpecs))
}
if ts.Status != "" {
if ts.Status != TaskRunSpecStatusCancelled {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s should be %s", ts.Status, TaskRunSpecStatusCancelled), "status"))
}
}
if ts.Status == "" {
if ts.StatusMessage != "" {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("statusMessage should not be set if status is not set, but it is currently set to %s", ts.StatusMessage), "statusMessage"))
}
}
if ts.Retries < 0 {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%d should be >= 0", ts.Retries), "retries"))
}
if ts.PodTemplate != nil {
errs = errs.Also(validatePodTemplateEnv(ctx, *ts.PodTemplate))
}
if ts.Timeout != nil && ts.Timeout.Duration < 0 {
errs = errs.Also(apis.ErrInvalidValue(ts.Timeout.Duration.String()+" should be >= 0", "timeout"))
}
return errs
}
// ValidateUpdate validates the update of a TaskRunSpec
func (ts *TaskRunSpec) ValidateUpdate(ctx context.Context) (errs *apis.FieldError) {
if !apis.IsInUpdate(ctx) {
return
}
oldObj, ok := apis.GetBaseline(ctx).(*TaskRun)
if !ok || oldObj == nil {
return
}
if (oldObj.Spec.ManagedBy == nil) != (ts.ManagedBy == nil) || (oldObj.Spec.ManagedBy != nil && *oldObj.Spec.ManagedBy != *ts.ManagedBy) {
errs = errs.Also(apis.ErrInvalidValue("managedBy is immutable", "spec.managedBy"))
}
if oldObj.IsDone() {
// try comparing without any copying first
// this handles the common case where only finalizers changed
if equality.Semantic.DeepEqual(&oldObj.Spec, ts) {
return nil // Specs identical, allow update
}
// Specs differ, this could be due to different defaults after upgrade
// Apply current defaults to old spec to normalize
oldCopy := oldObj.Spec.DeepCopy()
oldCopy.SetDefaults(ctx)
if equality.Semantic.DeepEqual(oldCopy, ts) {
return nil // Difference was only defaults, allow update
}
// Real spec changes detected, reject update
errs = errs.Also(apis.ErrInvalidValue("Once the TaskRun is complete, no updates are allowed", ""))
return errs
}
// Handle started but not done case
old := oldObj.Spec.DeepCopy()
old.Status = ts.Status
old.StatusMessage = ts.StatusMessage
old.ManagedBy = ts.ManagedBy // Already tested before
if !equality.Semantic.DeepEqual(old, ts) {
errs = errs.Also(apis.ErrInvalidValue("Once the TaskRun has started, only status and statusMessage updates are allowed", ""))
}
return
}
// validateInlineParameters validates that any parameters called in the
// Task spec are declared in the TaskRun.
// This is crucial for propagated parameters because the parameters could
// be defined under taskRun and then called directly in the task steps.
// In this case, parameters cannot be validated by the underlying taskSpec
// since they may not have the parameters declared because of propagation.
func (ts *TaskRunSpec) validateInlineParameters(ctx context.Context) (errs *apis.FieldError) {
if ts.TaskSpec == nil {
return errs
}
paramSpecForValidation := make(map[string]ParamSpec)
for _, p := range ts.Params {
paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation)
}
for _, p := range ts.TaskSpec.Params {
var err *apis.FieldError
paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation)
if err != nil {
errs = errs.Also(err)
}
}
var paramSpec []ParamSpec
for _, v := range paramSpecForValidation {
paramSpec = append(paramSpec, v)
}
if ts.TaskSpec != nil && ts.TaskSpec.Steps != nil {
errs = errs.Also(ValidateParameterTypes(ctx, paramSpec))
errs = errs.Also(ValidateParameterVariables(ctx, ts.TaskSpec.Steps, paramSpec))
errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, ts.TaskSpec.Steps, paramSpec))
}
return errs
}
func validatePodTemplateEnv(ctx context.Context, podTemplate pod.Template) (errs *apis.FieldError) {
forbiddenEnvsConfigured := config.FromContextOrDefaults(ctx).Defaults.DefaultForbiddenEnv
if len(forbiddenEnvsConfigured) == 0 {
return errs
}
for _, pEnv := range podTemplate.Env {
if slices.Contains(forbiddenEnvsConfigured, pEnv.Name) {
errs = errs.Also(apis.ErrInvalidValue("PodTemplate cannot update a forbidden env: "+pEnv.Name, "PodTemplate.Env"))
}
}
return errs
}
func createParamSpecFromParam(p Param, paramSpecForValidation map[string]ParamSpec) map[string]ParamSpec {
value := p.Value
pSpec := ParamSpec{
Name: p.Name,
Default: &value,
Type: p.Value.Type,
}
if p.Value.ObjectVal != nil {
pSpec.Properties = make(map[string]PropertySpec)
prop := make(map[string]PropertySpec)
for k := range p.Value.ObjectVal {
prop[k] = PropertySpec{Type: ParamTypeString}
}
pSpec.Properties = prop
}
paramSpecForValidation[p.Name] = pSpec
return paramSpecForValidation
}
func combineParamSpec(p ParamSpec, paramSpecForValidation map[string]ParamSpec) (map[string]ParamSpec, *apis.FieldError) {
if pSpec, ok := paramSpecForValidation[p.Name]; ok {
// Merge defaults with provided values in the taskrun.
if p.Default != nil && p.Default.ObjectVal != nil {
for k, v := range p.Default.ObjectVal {
if pSpec.Default.ObjectVal == nil {
pSpec.Default.ObjectVal = map[string]string{k: v}
} else {
pSpec.Default.ObjectVal[k] = v
}
}
// If Default values of object type are provided then Properties must also be fully declared.
if p.Properties == nil {
return paramSpecForValidation, apis.ErrMissingField(p.Name + ".properties")
}
}
// Properties must be defined if paramSpec is of object Type
if pSpec.Type == ParamTypeObject {
if p.Properties == nil {
return paramSpecForValidation, apis.ErrMissingField(p.Name + ".properties")
}
// Expect Properties to be complete
pSpec.Properties = p.Properties
}
paramSpecForValidation[p.Name] = pSpec
} else {
// No values provided by task run but found a paramSpec declaration.
// Expect it to be fully speced out.
paramSpecForValidation[p.Name] = p
}
return paramSpecForValidation, nil
}
// validateDebug validates the debug section of the TaskRun.
// if set, onFailure breakpoint must be "enabled"
func validateDebug(db *TaskRunDebug) (errs *apis.FieldError) {
if db == nil || db.Breakpoints == nil {
return errs
}
if db.Breakpoints.OnFailure == "" {
errs = errs.Also(apis.ErrInvalidValue("onFailure breakpoint is empty, it is only allowed to be set as enabled", "breakpoints.onFailure"))
}
if db.Breakpoints.OnFailure != "" && db.Breakpoints.OnFailure != EnabledOnFailureBreakpoint {
errs = errs.Also(apis.ErrInvalidValue(db.Breakpoints.OnFailure+" is not a valid onFailure breakpoint value, onFailure breakpoint is only allowed to be set as enabled", "breakpoints.onFailure"))
}
beforeSteps := sets.NewString()
for i, step := range db.Breakpoints.BeforeSteps {
if beforeSteps.Has(step) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("before step must be unique, the same step: %s is defined multiple times at", step), fmt.Sprintf("breakpoints.beforeSteps[%d]", i)))
}
beforeSteps.Insert(step)
}
return errs
}
// ValidateWorkspaceBindings makes sure the volumes provided for the Task's declared workspaces make sense.
func ValidateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs *apis.FieldError) {
var names []string
for idx, w := range wb {
names = append(names, w.Name)
errs = errs.Also(w.Validate(ctx).ViaIndex(idx))
}
errs = errs.Also(validateNoDuplicateNames(names, true))
return errs
}
// ValidateParameters makes sure the params for the Task are valid.
func ValidateParameters(ctx context.Context, params Params) (errs *apis.FieldError) {
var names []string
for _, p := range params {
names = append(names, p.Name)
}
return errs.Also(validateNoDuplicateNames(names, false))
}
func validateStepSpecs(specs []TaskRunStepSpec) (errs *apis.FieldError) {
var names []string
for i, o := range specs {
if o.Name == "" {
errs = errs.Also(apis.ErrMissingField("name").ViaIndex(i))
} else {
names = append(names, o.Name)
}
}
errs = errs.Also(validateNoDuplicateNames(names, true))
return errs
}
// validateTaskRunComputeResources ensures that compute resources are not configured at both the step level and the task level
func validateTaskRunComputeResources(computeResources *corev1.ResourceRequirements, specs []TaskRunStepSpec) (errs *apis.FieldError) {
for _, spec := range specs {
if spec.ComputeResources.Size() != 0 && computeResources != nil {
return apis.ErrMultipleOneOf(
"stepSpecs.resources",
"computeResources",
)
}
}
return nil
}
func validateSidecarSpecs(specs []TaskRunSidecarSpec) (errs *apis.FieldError) {
var names []string
for i, o := range specs {
if o.Name == "" {
errs = errs.Also(apis.ErrMissingField("name").ViaIndex(i))
} else {
names = append(names, o.Name)
}
}
errs = errs.Also(validateNoDuplicateNames(names, true))
return errs
}
// validateNoDuplicateNames returns an error for each name that is repeated in names.
// Case insensitive.
// If byIndex is true, the error will be reported by index instead of by key.
func validateNoDuplicateNames(names []string, byIndex bool) (errs *apis.FieldError) {
seen := sets.NewString()
for i, n := range names {
if seen.Has(strings.ToLower(n)) {
if byIndex {
errs = errs.Also(apis.ErrMultipleOneOf("name").ViaIndex(i))
} else {
errs = errs.Also(apis.ErrMultipleOneOf("name").ViaKey(n))
}
}
seen.Insert(strings.ToLower(n))
}
return errs
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"github.com/google/go-cmp/cmp"
)
// Algorithm Standard cryptographic hash algorithm
type Algorithm string
// Artifact represents an artifact within a system, potentially containing multiple values
// associated with it.
type Artifact struct {
// The artifact's identifying category name
Name string `json:"name,omitempty"`
// A collection of values related to the artifact
Values []ArtifactValue `json:"values,omitempty"`
// Indicate if the artifact is a build output or a by-product
BuildOutput bool `json:"buildOutput,omitempty"`
}
// ArtifactValue represents a specific value or data element within an Artifact.
type ArtifactValue struct {
Digest map[Algorithm]string `json:"digest,omitempty"` // Algorithm-specific digests for verifying the content (e.g., SHA256)
Uri string `json:"uri,omitempty"` // Location where the artifact value can be retrieved
}
// TaskRunStepArtifact represents an artifact produced or used by a step within a task run.
// It directly uses the Artifact type for its structure.
type TaskRunStepArtifact = Artifact
// Artifacts represents the collection of input and output artifacts associated with
// a task run or a similar process. Artifacts in this context are units of data or resources
// that the process either consumes as input or produces as output.
type Artifacts struct {
Inputs []Artifact `json:"inputs,omitempty"`
Outputs []Artifact `json:"outputs,omitempty"`
}
func (a *Artifacts) Merge(another *Artifacts) {
inputMap := make(map[string][]ArtifactValue)
var newInputs []Artifact
for _, v := range a.Inputs {
inputMap[v.Name] = v.Values
}
if another != nil {
for _, v := range another.Inputs {
_, ok := inputMap[v.Name]
if !ok {
inputMap[v.Name] = []ArtifactValue{}
}
for _, vv := range v.Values {
exists := false
for _, av := range inputMap[v.Name] {
if cmp.Equal(vv, av) {
exists = true
break
}
}
if !exists {
inputMap[v.Name] = append(inputMap[v.Name], vv)
}
}
}
}
for k, v := range inputMap {
newInputs = append(newInputs, Artifact{
Name: k,
Values: v,
})
}
outputMap := make(map[string]Artifact)
var newOutputs []Artifact
for _, v := range a.Outputs {
outputMap[v.Name] = v
}
if another != nil {
for _, v := range another.Outputs {
_, ok := outputMap[v.Name]
if !ok {
outputMap[v.Name] = Artifact{Name: v.Name, Values: []ArtifactValue{}, BuildOutput: v.BuildOutput}
}
// only update buildOutput to true.
// Do not convert to false if it was true before.
if v.BuildOutput {
art := outputMap[v.Name]
art.BuildOutput = v.BuildOutput
outputMap[v.Name] = art
}
for _, vv := range v.Values {
exists := false
for _, av := range outputMap[v.Name].Values {
if cmp.Equal(vv, av) {
exists = true
break
}
}
if !exists {
art := outputMap[v.Name]
art.Values = append(art.Values, vv)
outputMap[v.Name] = art
}
}
}
}
for _, v := range outputMap {
newOutputs = append(newOutputs, Artifact{
Name: v.Name,
Values: v.Values,
BuildOutput: v.BuildOutput,
})
}
a.Inputs = newInputs
a.Outputs = newOutputs
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"encoding/json"
"strings"
)
// ParamType indicates the type of an input parameter;
// Used to distinguish between a single string and an array of strings.
type ParamType string
// Valid ParamTypes:
const (
ParamTypeString ParamType = "string"
ParamTypeArray ParamType = "array"
ParamTypeObject ParamType = "object"
)
// AllParamTypes can be used for ParamType validation.
var AllParamTypes = []ParamType{ParamTypeString, ParamTypeArray, ParamTypeObject}
// ParamValues is modeled after IntOrString in kubernetes/apimachinery:
// ParamValue is a type that can hold a single string, string array, or string map.
// Used in JSON unmarshalling so that a single JSON field can accept
// either an individual string or an array of strings.
type ParamValue struct {
Type ParamType // Represents the stored type of ParamValues.
StringVal string
// +listType=atomic
ArrayVal []string
ObjectVal map[string]string
}
// PropertySpec defines the struct for object keys
type PropertySpec struct {
Type ParamType `json:"type,omitempty"`
}
// ParamsPrefix is the prefix used in $(...) expressions referring to parameters
const ParamsPrefix = "params"
// ArrayReference returns the name of the parameter from array parameter reference
// returns arrayParam from $(params.arrayParam[*])
func ArrayReference(a string) string {
return strings.TrimSuffix(strings.TrimPrefix(a, "$("+ParamsPrefix+"."), "[*])")
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (paramValues *ParamValue) UnmarshalJSON(value []byte) error {
// ParamValues is used for Results Value as well, the results can be any kind of
// data so we need to check if it is empty.
if len(value) == 0 {
paramValues.Type = ParamTypeString
return nil
}
if value[0] == '[' {
// We're trying to Unmarshal to []string, but for cases like []int or other types
// of nested array which we don't support yet, we should continue and Unmarshal
// it to String. If the Type being set doesn't match what it actually should be,
// it will be captured by validation in reconciler.
// if failed to unmarshal to array, we will convert the value to string and marshal it to string
var a []string
if err := json.Unmarshal(value, &a); err == nil {
paramValues.Type = ParamTypeArray
paramValues.ArrayVal = a
return nil
}
}
if value[0] == '{' {
// if failed to unmarshal to map, we will convert the value to string and marshal it to string
var m map[string]string
if err := json.Unmarshal(value, &m); err == nil {
paramValues.Type = ParamTypeObject
paramValues.ObjectVal = m
return nil
}
}
// By default we unmarshal to string
paramValues.Type = ParamTypeString
if err := json.Unmarshal(value, ¶mValues.StringVal); err == nil {
return nil
}
paramValues.StringVal = string(value)
return nil
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import "strings"
// TaskResult used to describe the results of a task
type TaskResult struct {
// Name the given name
Name string `json:"name"`
// Type is the user-specified type of the result. The possible type
// is currently "string" and will support "array" in following work.
// +optional
Type ResultsType `json:"type,omitempty"`
// Properties is the JSON Schema properties to support key-value pairs results.
// +optional
Properties map[string]PropertySpec `json:"properties,omitempty"`
// Description is a human-readable description of the result
// +optional
Description string `json:"description,omitempty"`
// Value the expression used to retrieve the value of the result from an underlying Step.
// +optional
Value *ResultValue `json:"value,omitempty"`
}
// StepResult used to describe the Results of a Step.
type StepResult struct {
// Name the given name
Name string `json:"name"`
// The possible types are 'string', 'array', and 'object', with 'string' as the default.
// +optional
Type ResultsType `json:"type,omitempty"`
// Properties is the JSON Schema properties to support key-value pairs results.
// +optional
Properties map[string]PropertySpec `json:"properties,omitempty"`
// Description is a human-readable description of the result
// +optional
Description string `json:"description,omitempty"`
}
// TaskRunResult used to describe the results of a task
type TaskRunResult struct {
// Name the given name
Name string `json:"name"`
// Type is the user-specified type of the result. The possible type
// is currently "string" and will support "array" in following work.
// +optional
Type ResultsType `json:"type,omitempty"`
// Value the given value of the result
Value ResultValue `json:"value"`
}
// TaskRunStepResult is a type alias of TaskRunResult
type TaskRunStepResult = TaskRunResult
// ResultValue is a type alias of ParamValue
type ResultValue = ParamValue
// ResultsType indicates the type of a result;
// Used to distinguish between a single string and an array of strings.
// Note that there is ResultType used to find out whether a
// RunResult is from a task result or not, which is different from
// this ResultsType.
type ResultsType string
// Valid ResultsType:
const (
ResultsTypeString ResultsType = "string"
ResultsTypeArray ResultsType = "array"
ResultsTypeObject ResultsType = "object"
)
// AllResultsTypes can be used for ResultsTypes validation.
var AllResultsTypes = []ResultsType{ResultsTypeString, ResultsTypeArray, ResultsTypeObject}
// ResultsArrayReference returns the reference of the result. e.g. results.resultname from $(results.resultname[*])
func ResultsArrayReference(a string) string {
return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(a, "$("), ")"), "[*]")
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"regexp"
"strings"
)
const (
// TODO(#2462) use one regex across all substitutions
// variableSubstitutionFormat matches format like $result.resultname, $result.resultname[int] and $result.resultname[*]
variableSubstitutionFormat = `\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)`
)
// VariableSubstitutionRegex is a regex to find all result matching substitutions
var VariableSubstitutionRegex = regexp.MustCompile(variableSubstitutionFormat)
func stripVarSubExpression(expression string) string {
return strings.TrimSuffix(strings.TrimPrefix(expression, "$("), ")")
}
func validateString(value string) []string {
expressions := VariableSubstitutionRegex.FindAllString(value, -1)
if expressions == nil {
return nil
}
var result []string
for _, expression := range expressions {
result = append(result, stripVarSubExpression(expression))
}
return result
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"fmt"
"github.com/tektoncd/pipeline/pkg/substitution"
"k8s.io/apimachinery/pkg/selection"
)
// WhenExpression allows a PipelineTask to declare expressions to be evaluated before the Task is run
// to determine whether the Task should be executed or skipped
type WhenExpression struct {
// Input is the string for guard checking which can be a static input or an output from a parent Task
Input string `json:"input,omitempty"`
// Operator that represents an Input's relationship to the values
Operator selection.Operator `json:"operator,omitempty"`
// Values is an array of strings, which is compared against the input, for guard checking
// It must be non-empty
// +listType=atomic
Values []string `json:"values,omitempty"`
// CEL is a string of Common Language Expression, which can be used to conditionally execute
// the task based on the result of the expression evaluation
// More info about CEL syntax: https://github.com/google/cel-spec/blob/master/doc/langdef.md
// +optional
CEL string `json:"cel,omitempty"`
}
func (we *WhenExpression) isInputInValues() bool {
for i := range we.Values {
if we.Values[i] == we.Input {
return true
}
}
return false
}
func (we *WhenExpression) isTrue() bool {
if we.Operator == selection.In {
return we.isInputInValues()
}
// selection.NotIn
return !we.isInputInValues()
}
func (we *WhenExpression) applyReplacements(replacements map[string]string, arrayReplacements map[string][]string) WhenExpression {
replacedInput := substitution.ApplyReplacements(we.Input, replacements)
replacedCEL := substitution.ApplyReplacements(we.CEL, replacements)
var replacedValues []string
for _, val := range we.Values {
// arrayReplacements holds a list of array parameters with a pattern - params.arrayParam1
// array params are referenced using $(params.arrayParam1[*])
// array results are referenced using $(results.resultname[*])
// check if the param exist in the arrayReplacements to replace it with a list of values
if _, ok := arrayReplacements[fmt.Sprintf("%s.%s", ParamsPrefix, ArrayReference(val))]; ok {
replacedValues = append(replacedValues, substitution.ApplyArrayReplacements(val, replacements, arrayReplacements)...)
} else if _, ok := arrayReplacements[ResultsArrayReference(val)]; ok {
replacedValues = append(replacedValues, substitution.ApplyArrayReplacements(val, replacements, arrayReplacements)...)
} else {
replacedValues = append(replacedValues, substitution.ApplyReplacements(val, replacements))
}
}
return WhenExpression{Input: replacedInput, Operator: we.Operator, Values: replacedValues, CEL: replacedCEL}
}
// GetVarSubstitutionExpressions extracts all the values between "$(" and ")" in a When Expression
func (we *WhenExpression) GetVarSubstitutionExpressions() ([]string, bool) {
var allExpressions []string
allExpressions = append(allExpressions, validateString(we.Input)...)
allExpressions = append(allExpressions, validateString(we.CEL)...)
for _, value := range we.Values {
allExpressions = append(allExpressions, validateString(value)...)
}
return allExpressions, len(allExpressions) != 0
}
// WhenExpressions are used to specify whether a Task should be executed or skipped
// All of them need to evaluate to True for a guarded Task to be executed.
type WhenExpressions []WhenExpression
type StepWhenExpressions = WhenExpressions
// AllowsExecution evaluates an Input's relationship to an array of Values, based on the Operator,
// to determine whether all the When Expressions are True. If they are all True, the guarded Task is
// executed, otherwise it is skipped.
// If CEL expression exists, AllowsExecution will get the evaluated results from evaluatedCEL and determine
// if the Task should be skipped.
func (wes WhenExpressions) AllowsExecution(evaluatedCEL map[string]bool) bool {
for _, we := range wes {
if !we.isTrue() || (we.CEL != "" && !evaluatedCEL[we.CEL]) {
return false
}
}
return true
}
// ReplaceVariables interpolates variables, such as Parameters and Results, in
// the Input and Values.
func (wes WhenExpressions) ReplaceVariables(replacements map[string]string, arrayReplacements map[string][]string) WhenExpressions {
replaced := wes
for i := range wes {
replaced[i] = wes[i].applyReplacements(replacements, arrayReplacements)
}
return replaced
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"encoding/json"
"fmt"
"github.com/tektoncd/pipeline/pkg/substitution"
"k8s.io/apimachinery/pkg/selection"
)
// WhenExpression allows a PipelineTask to declare expressions to be evaluated before the Task is run
// to determine whether the Task should be executed or skipped
type WhenExpression struct {
// Input is the string for guard checking which can be a static input or an output from a parent Task
Input string `json:"input,omitempty"`
// Operator that represents an Input's relationship to the values
Operator selection.Operator `json:"operator,omitempty"`
// Values is an array of strings, which is compared against the input, for guard checking
// It must be non-empty
// +listType=atomic
Values []string `json:"values,omitempty"`
// CEL is a string of Common Language Expression, which can be used to conditionally execute
// the task based on the result of the expression evaluation
// More info about CEL syntax: https://github.com/google/cel-spec/blob/master/doc/langdef.md
// +optional
CEL string `json:"cel,omitempty"`
}
func (we *WhenExpression) isInputInValues() bool {
for i := range we.Values {
if we.Values[i] == we.Input {
return true
}
}
return false
}
func (we *WhenExpression) isTrue() bool {
if we.Operator == selection.In {
return we.isInputInValues()
}
// selection.NotIn
return !we.isInputInValues()
}
func (we *WhenExpression) applyReplacements(replacements map[string]string, arrayReplacements map[string][]string) WhenExpression {
replacedInput := applyReplacementsAsString(we.Input, replacements, arrayReplacements)
replacedCEL := substitution.ApplyReplacements(we.CEL, replacements)
var replacedValues []string
for _, val := range we.Values {
// arrayReplacements holds a list of array parameters with a pattern - params.arrayParam1
// array params are referenced using $(params.arrayParam1[*])
// array results are referenced using $(results.resultname[*])
// check if the param exist in the arrayReplacements to replace it with a list of values
if _, ok := arrayReplacements[fmt.Sprintf("%s.%s", ParamsPrefix, ArrayReference(val))]; ok {
replacedValues = append(replacedValues, substitution.ApplyArrayReplacements(val, replacements, arrayReplacements)...)
} else if _, ok := arrayReplacements[ResultsArrayReference(val)]; ok {
replacedValues = append(replacedValues, substitution.ApplyArrayReplacements(val, replacements, arrayReplacements)...)
} else {
replacedValues = append(replacedValues, substitution.ApplyReplacements(val, replacements))
}
}
return WhenExpression{Input: replacedInput, Operator: we.Operator, Values: replacedValues, CEL: replacedCEL}
}
func applyReplacementsAsString(s string, replacements map[string]string, arrayReplacements map[string][]string) string {
if _, ok := arrayReplacements[fmt.Sprintf("%s.%s", ParamsPrefix, ArrayReference(s))]; ok {
b, err := json.Marshal(substitution.ApplyArrayReplacements(s, replacements, arrayReplacements))
if err != nil {
return s
}
return string(b)
}
if _, ok := arrayReplacements[ResultsArrayReference(s)]; ok {
b, err := json.Marshal(substitution.ApplyArrayReplacements(s, replacements, arrayReplacements))
if err != nil {
return s
}
return string(b)
}
return substitution.ApplyReplacements(s, replacements)
}
// GetVarSubstitutionExpressions extracts all the values between "$(" and ")" in a When Expression
func (we *WhenExpression) GetVarSubstitutionExpressions() ([]string, bool) {
var allExpressions []string
allExpressions = append(allExpressions, validateString(we.Input)...)
allExpressions = append(allExpressions, validateString(we.CEL)...)
for _, value := range we.Values {
allExpressions = append(allExpressions, validateString(value)...)
}
return allExpressions, len(allExpressions) != 0
}
// WhenExpressions are used to specify whether a Task should be executed or skipped
// All of them need to evaluate to True for a guarded Task to be executed.
type WhenExpressions []WhenExpression
type StepWhenExpressions = WhenExpressions
// AllowsExecution evaluates an Input's relationship to an array of Values, based on the Operator,
// to determine whether all the When Expressions are True. If they are all True, the guarded Task is
// executed, otherwise it is skipped.
// If CEL expression exists, AllowsExecution will get the evaluated results from evaluatedCEL and determine
// if the Task should be skipped.
func (wes WhenExpressions) AllowsExecution(evaluatedCEL map[string]bool) bool {
for _, we := range wes {
if !we.isTrue() || (we.CEL != "" && !evaluatedCEL[we.CEL]) {
return false
}
}
return true
}
// ReplaceVariables interpolates variables, such as Parameters and Results, in
// the Input and Values.
func (wes WhenExpressions) ReplaceVariables(replacements map[string]string, arrayReplacements map[string][]string) WhenExpressions {
replaced := wes
for i := range wes {
replaced[i] = wes[i].applyReplacements(replacements, arrayReplacements)
}
return replaced
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"fmt"
"strings"
"github.com/google/cel-go/cel"
"github.com/tektoncd/pipeline/pkg/apis/config"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/apis"
)
var validWhenOperators = []string{
string(selection.In),
string(selection.NotIn),
}
func (wes WhenExpressions) validate(ctx context.Context) *apis.FieldError {
return wes.validateWhenExpressionsFields(ctx).ViaField("when")
}
func (wes WhenExpressions) validateWhenExpressionsFields(ctx context.Context) (errs *apis.FieldError) {
for idx, we := range wes {
errs = errs.Also(we.validateWhenExpressionFields(ctx).ViaIndex(idx))
}
return errs
}
func (we *WhenExpression) validateWhenExpressionFields(ctx context.Context) *apis.FieldError {
if we.CEL != "" {
if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableCELInWhenExpression {
return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use CEL: %s in WhenExpression", config.EnableCELInWhenExpression, we.CEL), "")
}
if we.Input != "" || we.Operator != "" || len(we.Values) != 0 {
return apis.ErrGeneric(fmt.Sprintf("cel and input+operator+values cannot be set in one WhenExpression: %v", we))
}
// We need to compile the CEL expression and check if it is a valid expression
// note that at the validation webhook, Tekton's variables are not substituted,
// so they need to be wrapped with single quotes.
// e.g. This is a valid CEL expression: '$(params.foo)' == 'foo';
// But this is not a valid expression since CEL cannot recognize: $(params.foo) == 'foo';
// This is not valid since we don't pass params to CEL's environment: params.foo == 'foo';
env, _ := cel.NewEnv()
_, iss := env.Compile(we.CEL)
if iss.Err() != nil {
return apis.ErrGeneric("invalid cel expression: %s with err: %s", we.CEL, iss.Err().Error())
}
return nil
}
if equality.Semantic.DeepEqual(we, &WhenExpression{}) || we == nil {
return apis.ErrMissingField(apis.CurrentField)
}
if !sets.NewString(validWhenOperators...).Has(string(we.Operator)) {
message := fmt.Sprintf("operator %q is not recognized. valid operators: %s", we.Operator, strings.Join(validWhenOperators, ","))
return apis.ErrInvalidValue(message, apis.CurrentField)
}
if len(we.Values) == 0 {
return apis.ErrInvalidValue("expecting non-empty values field", apis.CurrentField)
}
return nil
}
func (wes WhenExpressions) validatePipelineParametersVariables(prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
for idx, we := range wes {
errs = errs.Also(validateStringVariable(we.Input, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("input").ViaFieldIndex("when", idx))
for _, val := range we.Values {
// one of the values could be a reference to an array param, such as, $(params.foo[*])
// extract the variable name from the pattern $(params.foo[*]), if the variable name matches with one of the array params
// validate the param as an array variable otherwise, validate it as a string variable
if arrayParamNames.Has(ArrayReference(val)) {
errs = errs.Also(validateArrayVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("values").ViaFieldIndex("when", idx))
} else {
errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("values").ViaFieldIndex("when", idx))
}
}
}
return errs
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"path/filepath"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
corev1 "k8s.io/api/core/v1"
)
// WorkspaceDeclaration is a declaration of a volume that a Task requires.
type WorkspaceDeclaration struct {
// Name is the name by which you can bind the volume at runtime.
Name string `json:"name"`
// Description is an optional human readable description of this volume.
// +optional
Description string `json:"description,omitempty"`
// MountPath overrides the directory that the volume will be made available at.
// +optional
MountPath string `json:"mountPath,omitempty"`
// ReadOnly dictates whether a mounted volume is writable. By default this
// field is false and so mounted volumes are writable.
ReadOnly bool `json:"readOnly,omitempty"`
// Optional marks a Workspace as not being required in TaskRuns. By default
// this field is false and so declared workspaces are required.
Optional bool `json:"optional,omitempty"`
}
// GetMountPath returns the mountPath for w which is the MountPath if provided or the
// default if not.
func (w *WorkspaceDeclaration) GetMountPath() string {
if w.MountPath != "" {
return w.MountPath
}
return filepath.Join(pipeline.WorkspaceDir, w.Name)
}
// WorkspaceBinding maps a Task's declared workspace to a Volume.
type WorkspaceBinding struct {
// Name is the name of the workspace populated by the volume.
Name string `json:"name"`
// SubPath is optionally a directory on the volume which should be used
// for this binding (i.e. the volume will be mounted at this sub directory).
// +optional
SubPath string `json:"subPath,omitempty"`
// VolumeClaimTemplate is a template for a claim that will be created in the same namespace.
// The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun.
// See PersistentVolumeClaim (API version: v1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
VolumeClaimTemplate *corev1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
// PersistentVolumeClaimVolumeSource represents a reference to a
// PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.
// +optional
PersistentVolumeClaim *corev1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"`
// EmptyDir represents a temporary directory that shares a Task's lifetime.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// Either this OR PersistentVolumeClaim can be used.
// +optional
EmptyDir *corev1.EmptyDirVolumeSource `json:"emptyDir,omitempty"`
// ConfigMap represents a configMap that should populate this workspace.
// +optional
ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty"`
// Secret represents a secret that should populate this workspace.
// +optional
Secret *corev1.SecretVolumeSource `json:"secret,omitempty"`
// Projected represents a projected volume that should populate this workspace.
// +optional
Projected *corev1.ProjectedVolumeSource `json:"projected,omitempty"`
// CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.
// +optional
CSI *corev1.CSIVolumeSource `json:"csi,omitempty"`
}
// WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun
// is expected to populate with a workspace binding.
//
// Deprecated: use PipelineWorkspaceDeclaration type instead
type WorkspacePipelineDeclaration = PipelineWorkspaceDeclaration
// PipelineWorkspaceDeclaration creates a named slot in a Pipeline that a PipelineRun
// is expected to populate with a workspace binding.
type PipelineWorkspaceDeclaration struct {
// Name is the name of a workspace to be provided by a PipelineRun.
Name string `json:"name"`
// Description is a human readable string describing how the workspace will be
// used in the Pipeline. It can be useful to include a bit of detail about which
// tasks are intended to have access to the data on the workspace.
// +optional
Description string `json:"description,omitempty"`
// Optional marks a Workspace as not being required in PipelineRuns. By default
// this field is false and so declared workspaces are required.
Optional bool `json:"optional,omitempty"`
}
// WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be
// mapped to a task's declared workspace.
type WorkspacePipelineTaskBinding struct {
// Name is the name of the workspace as declared by the task
Name string `json:"name"`
// Workspace is the name of the workspace declared by the pipeline
// +optional
Workspace string `json:"workspace,omitempty"`
// SubPath is optionally a directory on the volume which should be used
// for this binding (i.e. the volume will be mounted at this sub directory).
// +optional
SubPath string `json:"subPath,omitempty"`
}
// WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access
// to a Workspace defined in a Task.
type WorkspaceUsage struct {
// Name is the name of the workspace this Step or Sidecar wants access to.
Name string `json:"name"`
// MountPath is the path that the workspace should be mounted to inside the Step or Sidecar,
// overriding any MountPath specified in the Task's WorkspaceDeclaration.
MountPath string `json:"mountPath"`
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"context"
"k8s.io/apimachinery/pkg/api/equality"
"knative.dev/pkg/apis"
)
// allVolumeSourceFields is a list of all the volume source field paths that a
// WorkspaceBinding may include.
var allVolumeSourceFields = []string{
"persistentvolumeclaim",
"volumeclaimtemplate",
"emptydir",
"configmap",
"secret",
}
// Validate looks at the Volume provided in wb and makes sure that it is valid.
// This means that only one VolumeSource can be specified, and also that the
// supported VolumeSource is itself valid.
func (b *WorkspaceBinding) Validate(ctx context.Context) (errs *apis.FieldError) {
if equality.Semantic.DeepEqual(b, &WorkspaceBinding{}) || b == nil {
return apis.ErrMissingField(apis.CurrentField)
}
numSources := b.numSources()
if numSources > 1 {
return apis.ErrMultipleOneOf(allVolumeSourceFields...)
}
if numSources == 0 {
return apis.ErrMissingOneOf(allVolumeSourceFields...)
}
// For a PersistentVolumeClaim to work, you must at least provide the name of the PVC to use.
if b.PersistentVolumeClaim != nil && b.PersistentVolumeClaim.ClaimName == "" {
return apis.ErrMissingField("persistentvolumeclaim.claimname")
}
// For a ConfigMap to work, you must provide the name of the ConfigMap to use.
if b.ConfigMap != nil && b.ConfigMap.LocalObjectReference.Name == "" {
return apis.ErrMissingField("configmap.name")
}
// For a Secret to work, you must provide the name of the Secret to use.
if b.Secret != nil && b.Secret.SecretName == "" {
return apis.ErrMissingField("secret.secretName")
}
// For a Projected volume to work, you must provide at least one source.
if b.Projected != nil && len(b.Projected.Sources) == 0 {
if len(b.Projected.Sources) == 0 {
return apis.ErrMissingField("projected.sources")
}
}
// For a CSI to work, you must provide and have installed the driver to use.
if b.CSI != nil {
if b.CSI.Driver == "" {
return apis.ErrMissingField("csi.driver")
}
}
return nil
}
// numSources returns the total number of volume sources that this WorkspaceBinding
// has been configured with.
func (b *WorkspaceBinding) numSources() int {
n := 0
if b.VolumeClaimTemplate != nil {
n++
}
if b.PersistentVolumeClaim != nil {
n++
}
if b.EmptyDir != nil {
n++
}
if b.ConfigMap != nil {
n++
}
if b.Secret != nil {
n++
}
if b.Projected != nil {
n++
}
if b.CSI != nil {
n++
}
return n
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
config "github.com/tektoncd/pipeline/pkg/apis/config"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/run/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Artifact) DeepCopyInto(out *Artifact) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]ArtifactValue, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact.
func (in *Artifact) DeepCopy() *Artifact {
if in == nil {
return nil
}
out := new(Artifact)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ArtifactValue) DeepCopyInto(out *ArtifactValue) {
*out = *in
if in.Digest != nil {
in, out := &in.Digest, &out.Digest
*out = make(map[Algorithm]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactValue.
func (in *ArtifactValue) DeepCopy() *ArtifactValue {
if in == nil {
return nil
}
out := new(ArtifactValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Artifacts) DeepCopyInto(out *Artifacts) {
*out = *in
if in.Inputs != nil {
in, out := &in.Inputs, &out.Inputs
*out = make([]Artifact, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Outputs != nil {
in, out := &in.Outputs, &out.Outputs
*out = make([]Artifact, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifacts.
func (in *Artifacts) DeepCopy() *Artifacts {
if in == nil {
return nil
}
out := new(Artifacts)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChildStatusReference) DeepCopyInto(out *ChildStatusReference) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.WhenExpressions != nil {
in, out := &in.WhenExpressions, &out.WhenExpressions
*out = make([]WhenExpression, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChildStatusReference.
func (in *ChildStatusReference) DeepCopy() *ChildStatusReference {
if in == nil {
return nil
}
out := new(ChildStatusReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Combination) DeepCopyInto(out *Combination) {
{
in := &in
*out = make(Combination, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Combination.
func (in Combination) DeepCopy() Combination {
if in == nil {
return nil
}
out := new(Combination)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Combinations) DeepCopyInto(out *Combinations) {
{
in := &in
*out = make(Combinations, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = make(Combination, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Combinations.
func (in Combinations) DeepCopy() Combinations {
if in == nil {
return nil
}
out := new(Combinations)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EmbeddedTask) DeepCopyInto(out *EmbeddedTask) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Metadata.DeepCopyInto(&out.Metadata)
in.TaskSpec.DeepCopyInto(&out.TaskSpec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedTask.
func (in *EmbeddedTask) DeepCopy() *EmbeddedTask {
if in == nil {
return nil
}
out := new(EmbeddedTask)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IncludeParams) DeepCopyInto(out *IncludeParams) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeParams.
func (in *IncludeParams) DeepCopy() *IncludeParams {
if in == nil {
return nil
}
out := new(IncludeParams)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in IncludeParamsList) DeepCopyInto(out *IncludeParamsList) {
{
in := &in
*out = make(IncludeParamsList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeParamsList.
func (in IncludeParamsList) DeepCopy() IncludeParamsList {
if in == nil {
return nil
}
out := new(IncludeParamsList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Matrix) DeepCopyInto(out *Matrix) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make(IncludeParamsList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Matrix.
func (in *Matrix) DeepCopy() *Matrix {
if in == nil {
return nil
}
out := new(Matrix)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Param) DeepCopyInto(out *Param) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Param.
func (in *Param) DeepCopy() *Param {
if in == nil {
return nil
}
out := new(Param)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ParamSpec) DeepCopyInto(out *ParamSpec) {
*out = *in
if in.Properties != nil {
in, out := &in.Properties, &out.Properties
*out = make(map[string]PropertySpec, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Default != nil {
in, out := &in.Default, &out.Default
*out = new(ParamValue)
(*in).DeepCopyInto(*out)
}
if in.Enum != nil {
in, out := &in.Enum, &out.Enum
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamSpec.
func (in *ParamSpec) DeepCopy() *ParamSpec {
if in == nil {
return nil
}
out := new(ParamSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ParamSpecs) DeepCopyInto(out *ParamSpecs) {
{
in := &in
*out = make(ParamSpecs, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamSpecs.
func (in ParamSpecs) DeepCopy() ParamSpecs {
if in == nil {
return nil
}
out := new(ParamSpecs)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ParamValue) DeepCopyInto(out *ParamValue) {
*out = *in
if in.ArrayVal != nil {
in, out := &in.ArrayVal, &out.ArrayVal
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ObjectVal != nil {
in, out := &in.ObjectVal, &out.ObjectVal
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamValue.
func (in *ParamValue) DeepCopy() *ParamValue {
if in == nil {
return nil
}
out := new(ParamValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Params) DeepCopyInto(out *Params) {
{
in := &in
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Params.
func (in Params) DeepCopy() Params {
if in == nil {
return nil
}
out := new(Params)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Pipeline) DeepCopyInto(out *Pipeline) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline.
func (in *Pipeline) DeepCopy() *Pipeline {
if in == nil {
return nil
}
out := new(Pipeline)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Pipeline) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineList) DeepCopyInto(out *PipelineList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Pipeline, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList.
func (in *PipelineList) DeepCopy() *PipelineList {
if in == nil {
return nil
}
out := new(PipelineList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PipelineList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRef) DeepCopyInto(out *PipelineRef) {
*out = *in
in.ResolverRef.DeepCopyInto(&out.ResolverRef)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRef.
func (in *PipelineRef) DeepCopy() *PipelineRef {
if in == nil {
return nil
}
out := new(PipelineRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineResult) DeepCopyInto(out *PipelineResult) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineResult.
func (in *PipelineResult) DeepCopy() *PipelineResult {
if in == nil {
return nil
}
out := new(PipelineResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRun) DeepCopyInto(out *PipelineRun) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRun.
func (in *PipelineRun) DeepCopy() *PipelineRun {
if in == nil {
return nil
}
out := new(PipelineRun)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PipelineRun) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunList) DeepCopyInto(out *PipelineRunList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PipelineRun, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunList.
func (in *PipelineRunList) DeepCopy() *PipelineRunList {
if in == nil {
return nil
}
out := new(PipelineRunList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PipelineRunList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunResult) DeepCopyInto(out *PipelineRunResult) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunResult.
func (in *PipelineRunResult) DeepCopy() *PipelineRunResult {
if in == nil {
return nil
}
out := new(PipelineRunResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunRunStatus) DeepCopyInto(out *PipelineRunRunStatus) {
*out = *in
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(v1beta1.CustomRunStatus)
(*in).DeepCopyInto(*out)
}
if in.WhenExpressions != nil {
in, out := &in.WhenExpressions, &out.WhenExpressions
*out = make([]WhenExpression, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunRunStatus.
func (in *PipelineRunRunStatus) DeepCopy() *PipelineRunRunStatus {
if in == nil {
return nil
}
out := new(PipelineRunRunStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) {
*out = *in
if in.PipelineRef != nil {
in, out := &in.PipelineRef, &out.PipelineRef
*out = new(PipelineRef)
(*in).DeepCopyInto(*out)
}
if in.PipelineSpec != nil {
in, out := &in.PipelineSpec, &out.PipelineSpec
*out = new(PipelineSpec)
(*in).DeepCopyInto(*out)
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Timeouts != nil {
in, out := &in.Timeouts, &out.Timeouts
*out = new(TimeoutFields)
(*in).DeepCopyInto(*out)
}
in.TaskRunTemplate.DeepCopyInto(&out.TaskRunTemplate)
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspaceBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TaskRunSpecs != nil {
in, out := &in.TaskRunSpecs, &out.TaskRunSpecs
*out = make([]PipelineTaskRunSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ManagedBy != nil {
in, out := &in.ManagedBy, &out.ManagedBy
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunSpec.
func (in *PipelineRunSpec) DeepCopy() *PipelineRunSpec {
if in == nil {
return nil
}
out := new(PipelineRunSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunStatus) DeepCopyInto(out *PipelineRunStatus) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
in.PipelineRunStatusFields.DeepCopyInto(&out.PipelineRunStatusFields)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunStatus.
func (in *PipelineRunStatus) DeepCopy() *PipelineRunStatus {
if in == nil {
return nil
}
out := new(PipelineRunStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunStatusFields) DeepCopyInto(out *PipelineRunStatusFields) {
*out = *in
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]PipelineRunResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PipelineSpec != nil {
in, out := &in.PipelineSpec, &out.PipelineSpec
*out = new(PipelineSpec)
(*in).DeepCopyInto(*out)
}
if in.SkippedTasks != nil {
in, out := &in.SkippedTasks, &out.SkippedTasks
*out = make([]SkippedTask, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ChildReferences != nil {
in, out := &in.ChildReferences, &out.ChildReferences
*out = make([]ChildStatusReference, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FinallyStartTime != nil {
in, out := &in.FinallyStartTime, &out.FinallyStartTime
*out = (*in).DeepCopy()
}
if in.Provenance != nil {
in, out := &in.Provenance, &out.Provenance
*out = new(Provenance)
(*in).DeepCopyInto(*out)
}
if in.SpanContext != nil {
in, out := &in.SpanContext, &out.SpanContext
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunStatusFields.
func (in *PipelineRunStatusFields) DeepCopy() *PipelineRunStatusFields {
if in == nil {
return nil
}
out := new(PipelineRunStatusFields)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunTaskRunStatus) DeepCopyInto(out *PipelineRunTaskRunStatus) {
*out = *in
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(TaskRunStatus)
(*in).DeepCopyInto(*out)
}
if in.WhenExpressions != nil {
in, out := &in.WhenExpressions, &out.WhenExpressions
*out = make([]WhenExpression, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunTaskRunStatus.
func (in *PipelineRunTaskRunStatus) DeepCopy() *PipelineRunTaskRunStatus {
if in == nil {
return nil
}
out := new(PipelineRunTaskRunStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
*out = *in
if in.Tasks != nil {
in, out := &in.Tasks, &out.Tasks
*out = make([]PipelineTask, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(ParamSpecs, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]PipelineWorkspaceDeclaration, len(*in))
copy(*out, *in)
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]PipelineResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Finally != nil {
in, out := &in.Finally, &out.Finally
*out = make([]PipelineTask, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec.
func (in *PipelineSpec) DeepCopy() *PipelineSpec {
if in == nil {
return nil
}
out := new(PipelineSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTask) DeepCopyInto(out *PipelineTask) {
*out = *in
if in.TaskRef != nil {
in, out := &in.TaskRef, &out.TaskRef
*out = new(TaskRef)
(*in).DeepCopyInto(*out)
}
if in.TaskSpec != nil {
in, out := &in.TaskSpec, &out.TaskSpec
*out = new(EmbeddedTask)
(*in).DeepCopyInto(*out)
}
if in.When != nil {
in, out := &in.When, &out.When
*out = make(WhenExpressions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RunAfter != nil {
in, out := &in.RunAfter, &out.RunAfter
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Matrix != nil {
in, out := &in.Matrix, &out.Matrix
*out = new(Matrix)
(*in).DeepCopyInto(*out)
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspacePipelineTaskBinding, len(*in))
copy(*out, *in)
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.PipelineRef != nil {
in, out := &in.PipelineRef, &out.PipelineRef
*out = new(PipelineRef)
(*in).DeepCopyInto(*out)
}
if in.PipelineSpec != nil {
in, out := &in.PipelineSpec, &out.PipelineSpec
*out = new(PipelineSpec)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTask.
func (in *PipelineTask) DeepCopy() *PipelineTask {
if in == nil {
return nil
}
out := new(PipelineTask)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in PipelineTaskList) DeepCopyInto(out *PipelineTaskList) {
{
in := &in
*out = make(PipelineTaskList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskList.
func (in PipelineTaskList) DeepCopy() PipelineTaskList {
if in == nil {
return nil
}
out := new(PipelineTaskList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskMetadata) DeepCopyInto(out *PipelineTaskMetadata) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskMetadata.
func (in *PipelineTaskMetadata) DeepCopy() *PipelineTaskMetadata {
if in == nil {
return nil
}
out := new(PipelineTaskMetadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskParam) DeepCopyInto(out *PipelineTaskParam) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskParam.
func (in *PipelineTaskParam) DeepCopy() *PipelineTaskParam {
if in == nil {
return nil
}
out := new(PipelineTaskParam)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskRun) DeepCopyInto(out *PipelineTaskRun) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskRun.
func (in *PipelineTaskRun) DeepCopy() *PipelineTaskRun {
if in == nil {
return nil
}
out := new(PipelineTaskRun)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskRunSpec) DeepCopyInto(out *PipelineTaskRunSpec) {
*out = *in
if in.PodTemplate != nil {
in, out := &in.PodTemplate, &out.PodTemplate
*out = new(pod.Template)
(*in).DeepCopyInto(*out)
}
if in.StepSpecs != nil {
in, out := &in.StepSpecs, &out.StepSpecs
*out = make([]TaskRunStepSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SidecarSpecs != nil {
in, out := &in.SidecarSpecs, &out.SidecarSpecs
*out = make([]TaskRunSidecarSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = new(PipelineTaskMetadata)
(*in).DeepCopyInto(*out)
}
if in.ComputeResources != nil {
in, out := &in.ComputeResources, &out.ComputeResources
*out = new(corev1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskRunSpec.
func (in *PipelineTaskRunSpec) DeepCopy() *PipelineTaskRunSpec {
if in == nil {
return nil
}
out := new(PipelineTaskRunSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskRunTemplate) DeepCopyInto(out *PipelineTaskRunTemplate) {
*out = *in
if in.PodTemplate != nil {
in, out := &in.PodTemplate, &out.PodTemplate
*out = new(pod.Template)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskRunTemplate.
func (in *PipelineTaskRunTemplate) DeepCopy() *PipelineTaskRunTemplate {
if in == nil {
return nil
}
out := new(PipelineTaskRunTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineWorkspaceDeclaration) DeepCopyInto(out *PipelineWorkspaceDeclaration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineWorkspaceDeclaration.
func (in *PipelineWorkspaceDeclaration) DeepCopy() *PipelineWorkspaceDeclaration {
if in == nil {
return nil
}
out := new(PipelineWorkspaceDeclaration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PropertySpec) DeepCopyInto(out *PropertySpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertySpec.
func (in *PropertySpec) DeepCopy() *PropertySpec {
if in == nil {
return nil
}
out := new(PropertySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Provenance) DeepCopyInto(out *Provenance) {
*out = *in
if in.RefSource != nil {
in, out := &in.RefSource, &out.RefSource
*out = new(RefSource)
(*in).DeepCopyInto(*out)
}
if in.FeatureFlags != nil {
in, out := &in.FeatureFlags, &out.FeatureFlags
*out = new(config.FeatureFlags)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provenance.
func (in *Provenance) DeepCopy() *Provenance {
if in == nil {
return nil
}
out := new(Provenance)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Ref) DeepCopyInto(out *Ref) {
*out = *in
in.ResolverRef.DeepCopyInto(&out.ResolverRef)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ref.
func (in *Ref) DeepCopy() *Ref {
if in == nil {
return nil
}
out := new(Ref)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RefSource) DeepCopyInto(out *RefSource) {
*out = *in
if in.Digest != nil {
in, out := &in.Digest, &out.Digest
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RefSource.
func (in *RefSource) DeepCopy() *RefSource {
if in == nil {
return nil
}
out := new(RefSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolverRef) DeepCopyInto(out *ResolverRef) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverRef.
func (in *ResolverRef) DeepCopy() *ResolverRef {
if in == nil {
return nil
}
out := new(ResolverRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResultRef) DeepCopyInto(out *ResultRef) {
*out = *in
if in.ResultsIndex != nil {
in, out := &in.ResultsIndex, &out.ResultsIndex
*out = new(int)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResultRef.
func (in *ResultRef) DeepCopy() *ResultRef {
if in == nil {
return nil
}
out := new(ResultRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in RetriesStatus) DeepCopyInto(out *RetriesStatus) {
{
in := &in
*out = make(RetriesStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetriesStatus.
func (in RetriesStatus) DeepCopy() RetriesStatus {
if in == nil {
return nil
}
out := new(RetriesStatus)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Sidecar) DeepCopyInto(out *Sidecar) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]corev1.ContainerPort, len(*in))
copy(*out, *in)
}
if in.EnvFrom != nil {
in, out := &in.EnvFrom, &out.EnvFrom
*out = make([]corev1.EnvFromSource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.ComputeResources.DeepCopyInto(&out.ComputeResources)
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]corev1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeDevices != nil {
in, out := &in.VolumeDevices, &out.VolumeDevices
*out = make([]corev1.VolumeDevice, len(*in))
copy(*out, *in)
}
if in.LivenessProbe != nil {
in, out := &in.LivenessProbe, &out.LivenessProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.ReadinessProbe != nil {
in, out := &in.ReadinessProbe, &out.ReadinessProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.StartupProbe != nil {
in, out := &in.StartupProbe, &out.StartupProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.Lifecycle != nil {
in, out := &in.Lifecycle, &out.Lifecycle
*out = new(corev1.Lifecycle)
(*in).DeepCopyInto(*out)
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(corev1.SecurityContext)
(*in).DeepCopyInto(*out)
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspaceUsage, len(*in))
copy(*out, *in)
}
if in.RestartPolicy != nil {
in, out := &in.RestartPolicy, &out.RestartPolicy
*out = new(corev1.ContainerRestartPolicy)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sidecar.
func (in *Sidecar) DeepCopy() *Sidecar {
if in == nil {
return nil
}
out := new(Sidecar)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in SidecarList) DeepCopyInto(out *SidecarList) {
{
in := &in
*out = make(SidecarList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarList.
func (in SidecarList) DeepCopy() SidecarList {
if in == nil {
return nil
}
out := new(SidecarList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SidecarState) DeepCopyInto(out *SidecarState) {
*out = *in
in.ContainerState.DeepCopyInto(&out.ContainerState)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarState.
func (in *SidecarState) DeepCopy() *SidecarState {
if in == nil {
return nil
}
out := new(SidecarState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SkippedTask) DeepCopyInto(out *SkippedTask) {
*out = *in
if in.WhenExpressions != nil {
in, out := &in.WhenExpressions, &out.WhenExpressions
*out = make([]WhenExpression, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkippedTask.
func (in *SkippedTask) DeepCopy() *SkippedTask {
if in == nil {
return nil
}
out := new(SkippedTask)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Step) DeepCopyInto(out *Step) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.EnvFrom != nil {
in, out := &in.EnvFrom, &out.EnvFrom
*out = make([]corev1.EnvFromSource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.ComputeResources.DeepCopyInto(&out.ComputeResources)
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]corev1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeDevices != nil {
in, out := &in.VolumeDevices, &out.VolumeDevices
*out = make([]corev1.VolumeDevice, len(*in))
copy(*out, *in)
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(corev1.SecurityContext)
(*in).DeepCopyInto(*out)
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspaceUsage, len(*in))
copy(*out, *in)
}
if in.StdoutConfig != nil {
in, out := &in.StdoutConfig, &out.StdoutConfig
*out = new(StepOutputConfig)
**out = **in
}
if in.StderrConfig != nil {
in, out := &in.StderrConfig, &out.StderrConfig
*out = new(StepOutputConfig)
**out = **in
}
if in.Ref != nil {
in, out := &in.Ref, &out.Ref
*out = new(Ref)
(*in).DeepCopyInto(*out)
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]StepResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.When != nil {
in, out := &in.When, &out.When
*out = make(WhenExpressions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step.
func (in *Step) DeepCopy() *Step {
if in == nil {
return nil
}
out := new(Step)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in StepList) DeepCopyInto(out *StepList) {
{
in := &in
*out = make(StepList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepList.
func (in StepList) DeepCopy() StepList {
if in == nil {
return nil
}
out := new(StepList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepOutputConfig) DeepCopyInto(out *StepOutputConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepOutputConfig.
func (in *StepOutputConfig) DeepCopy() *StepOutputConfig {
if in == nil {
return nil
}
out := new(StepOutputConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepResult) DeepCopyInto(out *StepResult) {
*out = *in
if in.Properties != nil {
in, out := &in.Properties, &out.Properties
*out = make(map[string]PropertySpec, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepResult.
func (in *StepResult) DeepCopy() *StepResult {
if in == nil {
return nil
}
out := new(StepResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepState) DeepCopyInto(out *StepState) {
*out = *in
in.ContainerState.DeepCopyInto(&out.ContainerState)
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]TaskRunResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Provenance != nil {
in, out := &in.Provenance, &out.Provenance
*out = new(Provenance)
(*in).DeepCopyInto(*out)
}
if in.Inputs != nil {
in, out := &in.Inputs, &out.Inputs
*out = make([]Artifact, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Outputs != nil {
in, out := &in.Outputs, &out.Outputs
*out = make([]Artifact, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepState.
func (in *StepState) DeepCopy() *StepState {
if in == nil {
return nil
}
out := new(StepState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepTemplate) DeepCopyInto(out *StepTemplate) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.EnvFrom != nil {
in, out := &in.EnvFrom, &out.EnvFrom
*out = make([]corev1.EnvFromSource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.ComputeResources.DeepCopyInto(&out.ComputeResources)
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]corev1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeDevices != nil {
in, out := &in.VolumeDevices, &out.VolumeDevices
*out = make([]corev1.VolumeDevice, len(*in))
copy(*out, *in)
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(corev1.SecurityContext)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepTemplate.
func (in *StepTemplate) DeepCopy() *StepTemplate {
if in == nil {
return nil
}
out := new(StepTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Task) DeepCopyInto(out *Task) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task.
func (in *Task) DeepCopy() *Task {
if in == nil {
return nil
}
out := new(Task)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Task) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskBreakpoints) DeepCopyInto(out *TaskBreakpoints) {
*out = *in
if in.BeforeSteps != nil {
in, out := &in.BeforeSteps, &out.BeforeSteps
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskBreakpoints.
func (in *TaskBreakpoints) DeepCopy() *TaskBreakpoints {
if in == nil {
return nil
}
out := new(TaskBreakpoints)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskList) DeepCopyInto(out *TaskList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Task, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList.
func (in *TaskList) DeepCopy() *TaskList {
if in == nil {
return nil
}
out := new(TaskList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TaskList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRef) DeepCopyInto(out *TaskRef) {
*out = *in
in.ResolverRef.DeepCopyInto(&out.ResolverRef)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRef.
func (in *TaskRef) DeepCopy() *TaskRef {
if in == nil {
return nil
}
out := new(TaskRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskResult) DeepCopyInto(out *TaskResult) {
*out = *in
if in.Properties != nil {
in, out := &in.Properties, &out.Properties
*out = make(map[string]PropertySpec, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Value != nil {
in, out := &in.Value, &out.Value
*out = new(ParamValue)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskResult.
func (in *TaskResult) DeepCopy() *TaskResult {
if in == nil {
return nil
}
out := new(TaskResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRun) DeepCopyInto(out *TaskRun) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRun.
func (in *TaskRun) DeepCopy() *TaskRun {
if in == nil {
return nil
}
out := new(TaskRun)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TaskRun) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunDebug) DeepCopyInto(out *TaskRunDebug) {
*out = *in
if in.Breakpoints != nil {
in, out := &in.Breakpoints, &out.Breakpoints
*out = new(TaskBreakpoints)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunDebug.
func (in *TaskRunDebug) DeepCopy() *TaskRunDebug {
if in == nil {
return nil
}
out := new(TaskRunDebug)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunInputs) DeepCopyInto(out *TaskRunInputs) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunInputs.
func (in *TaskRunInputs) DeepCopy() *TaskRunInputs {
if in == nil {
return nil
}
out := new(TaskRunInputs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunList) DeepCopyInto(out *TaskRunList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]TaskRun, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunList.
func (in *TaskRunList) DeepCopy() *TaskRunList {
if in == nil {
return nil
}
out := new(TaskRunList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TaskRunList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunResult) DeepCopyInto(out *TaskRunResult) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunResult.
func (in *TaskRunResult) DeepCopy() *TaskRunResult {
if in == nil {
return nil
}
out := new(TaskRunResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunSidecarSpec) DeepCopyInto(out *TaskRunSidecarSpec) {
*out = *in
in.ComputeResources.DeepCopyInto(&out.ComputeResources)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunSidecarSpec.
func (in *TaskRunSidecarSpec) DeepCopy() *TaskRunSidecarSpec {
if in == nil {
return nil
}
out := new(TaskRunSidecarSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) {
*out = *in
if in.Debug != nil {
in, out := &in.Debug, &out.Debug
*out = new(TaskRunDebug)
(*in).DeepCopyInto(*out)
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TaskRef != nil {
in, out := &in.TaskRef, &out.TaskRef
*out = new(TaskRef)
(*in).DeepCopyInto(*out)
}
if in.TaskSpec != nil {
in, out := &in.TaskSpec, &out.TaskSpec
*out = new(TaskSpec)
(*in).DeepCopyInto(*out)
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.PodTemplate != nil {
in, out := &in.PodTemplate, &out.PodTemplate
*out = new(pod.Template)
(*in).DeepCopyInto(*out)
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspaceBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.StepSpecs != nil {
in, out := &in.StepSpecs, &out.StepSpecs
*out = make([]TaskRunStepSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SidecarSpecs != nil {
in, out := &in.SidecarSpecs, &out.SidecarSpecs
*out = make([]TaskRunSidecarSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ComputeResources != nil {
in, out := &in.ComputeResources, &out.ComputeResources
*out = new(corev1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.ManagedBy != nil {
in, out := &in.ManagedBy, &out.ManagedBy
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunSpec.
func (in *TaskRunSpec) DeepCopy() *TaskRunSpec {
if in == nil {
return nil
}
out := new(TaskRunSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunStatus) DeepCopyInto(out *TaskRunStatus) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
in.TaskRunStatusFields.DeepCopyInto(&out.TaskRunStatusFields)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunStatus.
func (in *TaskRunStatus) DeepCopy() *TaskRunStatus {
if in == nil {
return nil
}
out := new(TaskRunStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunStatusFields) DeepCopyInto(out *TaskRunStatusFields) {
*out = *in
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
if in.Steps != nil {
in, out := &in.Steps, &out.Steps
*out = make([]StepState, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RetriesStatus != nil {
in, out := &in.RetriesStatus, &out.RetriesStatus
*out = make(RetriesStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]TaskRunResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifacts != nil {
in, out := &in.Artifacts, &out.Artifacts
*out = new(Artifacts)
(*in).DeepCopyInto(*out)
}
if in.Sidecars != nil {
in, out := &in.Sidecars, &out.Sidecars
*out = make([]SidecarState, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TaskSpec != nil {
in, out := &in.TaskSpec, &out.TaskSpec
*out = new(TaskSpec)
(*in).DeepCopyInto(*out)
}
if in.Provenance != nil {
in, out := &in.Provenance, &out.Provenance
*out = new(Provenance)
(*in).DeepCopyInto(*out)
}
if in.SpanContext != nil {
in, out := &in.SpanContext, &out.SpanContext
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunStatusFields.
func (in *TaskRunStatusFields) DeepCopy() *TaskRunStatusFields {
if in == nil {
return nil
}
out := new(TaskRunStatusFields)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunStepSpec) DeepCopyInto(out *TaskRunStepSpec) {
*out = *in
in.ComputeResources.DeepCopyInto(&out.ComputeResources)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunStepSpec.
func (in *TaskRunStepSpec) DeepCopy() *TaskRunStepSpec {
if in == nil {
return nil
}
out := new(TaskRunStepSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskSpec) DeepCopyInto(out *TaskSpec) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(ParamSpecs, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Steps != nil {
in, out := &in.Steps, &out.Steps
*out = make([]Step, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make(Volumes, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.StepTemplate != nil {
in, out := &in.StepTemplate, &out.StepTemplate
*out = new(StepTemplate)
(*in).DeepCopyInto(*out)
}
if in.Sidecars != nil {
in, out := &in.Sidecars, &out.Sidecars
*out = make([]Sidecar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspaceDeclaration, len(*in))
copy(*out, *in)
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]TaskResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec.
func (in *TaskSpec) DeepCopy() *TaskSpec {
if in == nil {
return nil
}
out := new(TaskSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TimeoutFields) DeepCopyInto(out *TimeoutFields) {
*out = *in
if in.Pipeline != nil {
in, out := &in.Pipeline, &out.Pipeline
*out = new(metav1.Duration)
**out = **in
}
if in.Tasks != nil {
in, out := &in.Tasks, &out.Tasks
*out = new(metav1.Duration)
**out = **in
}
if in.Finally != nil {
in, out := &in.Finally, &out.Finally
*out = new(metav1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutFields.
func (in *TimeoutFields) DeepCopy() *TimeoutFields {
if in == nil {
return nil
}
out := new(TimeoutFields)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Volumes) DeepCopyInto(out *Volumes) {
{
in := &in
*out = make(Volumes, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volumes.
func (in Volumes) DeepCopy() Volumes {
if in == nil {
return nil
}
out := new(Volumes)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WhenExpression) DeepCopyInto(out *WhenExpression) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WhenExpression.
func (in *WhenExpression) DeepCopy() *WhenExpression {
if in == nil {
return nil
}
out := new(WhenExpression)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in WhenExpressions) DeepCopyInto(out *WhenExpressions) {
{
in := &in
*out = make(WhenExpressions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WhenExpressions.
func (in WhenExpressions) DeepCopy() WhenExpressions {
if in == nil {
return nil
}
out := new(WhenExpressions)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspaceBinding) DeepCopyInto(out *WorkspaceBinding) {
*out = *in
if in.VolumeClaimTemplate != nil {
in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate
*out = new(corev1.PersistentVolumeClaim)
(*in).DeepCopyInto(*out)
}
if in.PersistentVolumeClaim != nil {
in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim
*out = new(corev1.PersistentVolumeClaimVolumeSource)
**out = **in
}
if in.EmptyDir != nil {
in, out := &in.EmptyDir, &out.EmptyDir
*out = new(corev1.EmptyDirVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.ConfigMap != nil {
in, out := &in.ConfigMap, &out.ConfigMap
*out = new(corev1.ConfigMapVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
*out = new(corev1.SecretVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Projected != nil {
in, out := &in.Projected, &out.Projected
*out = new(corev1.ProjectedVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.CSI != nil {
in, out := &in.CSI, &out.CSI
*out = new(corev1.CSIVolumeSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceBinding.
func (in *WorkspaceBinding) DeepCopy() *WorkspaceBinding {
if in == nil {
return nil
}
out := new(WorkspaceBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspaceDeclaration) DeepCopyInto(out *WorkspaceDeclaration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceDeclaration.
func (in *WorkspaceDeclaration) DeepCopy() *WorkspaceDeclaration {
if in == nil {
return nil
}
out := new(WorkspaceDeclaration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspacePipelineTaskBinding) DeepCopyInto(out *WorkspacePipelineTaskBinding) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspacePipelineTaskBinding.
func (in *WorkspacePipelineTaskBinding) DeepCopy() *WorkspacePipelineTaskBinding {
if in == nil {
return nil
}
out := new(WorkspacePipelineTaskBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspaceUsage) DeepCopyInto(out *WorkspaceUsage) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceUsage.
func (in *WorkspaceUsage) DeepCopy() *WorkspaceUsage {
if in == nil {
return nil
}
out := new(WorkspaceUsage)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by openapi-gen. DO NOT EDIT.
package v1alpha1
import (
common "k8s.io/kube-openapi/pkg/common"
spec "k8s.io/kube-openapi/pkg/validation/spec"
)
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.Authority": schema_pkg_apis_pipeline_v1alpha1_Authority(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.EmbeddedRunSpec": schema_pkg_apis_pipeline_v1alpha1_EmbeddedRunSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.KeyRef": schema_pkg_apis_pipeline_v1alpha1_KeyRef(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.ResourcePattern": schema_pkg_apis_pipeline_v1alpha1_ResourcePattern(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.Run": schema_pkg_apis_pipeline_v1alpha1_Run(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.RunList": schema_pkg_apis_pipeline_v1alpha1_RunList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.RunSpec": schema_pkg_apis_pipeline_v1alpha1_RunSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.StepAction": schema_pkg_apis_pipeline_v1alpha1_StepAction(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.StepActionList": schema_pkg_apis_pipeline_v1alpha1_StepActionList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.StepActionSpec": schema_pkg_apis_pipeline_v1alpha1_StepActionSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.VerificationPolicy": schema_pkg_apis_pipeline_v1alpha1_VerificationPolicy(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.VerificationPolicyList": schema_pkg_apis_pipeline_v1alpha1_VerificationPolicyList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.VerificationPolicySpec": schema_pkg_apis_pipeline_v1alpha1_VerificationPolicySpec(ref),
}
}
func schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "AffinityAssistantTemplate holds pod specific configuration and is a subset of the generic pod Template",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"tolerations": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's tolerations.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"imagePullSecrets": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
},
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext sets the security context for the pod",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"},
}
}
func schema_pkg_apis_pipeline_pod_Template(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Template holds pod specific configuration",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables that can be provided to the containers belonging to the pod.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"tolerations": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's tolerations.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's scheduling constraints. See Pod.spec.affinity (API version: v1)",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. See Pod.spec.securityContext (API version: v1)",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"volumes": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge,retainKeys",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes See Pod.spec.volumes (API version: v1)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Volume"),
},
},
},
},
},
"runtimeClassName": {
SchemaProps: spec.SchemaProps{
Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.",
Type: []string{"string"},
Format: "",
},
},
"automountServiceAccountToken": {
SchemaProps: spec.SchemaProps{
Description: "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.",
Type: []string{"boolean"},
Format: "",
},
},
"dnsPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.",
Type: []string{"string"},
Format: "",
},
},
"dnsConfig": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
Ref: ref("k8s.io/api/core/v1.PodDNSConfig"),
},
},
"enableServiceLinks": {
SchemaProps: spec.SchemaProps{
Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
Type: []string{"boolean"},
Format: "",
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
Type: []string{"string"},
Format: "",
},
},
"schedulerName": {
SchemaProps: spec.SchemaProps{
Description: "SchedulerName specifies the scheduler to be used to dispatch the Pod",
Type: []string{"string"},
Format: "",
},
},
"imagePullSecrets": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
},
},
},
"hostAliases": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.HostAlias"),
},
},
},
},
},
"hostNetwork": {
SchemaProps: spec.SchemaProps{
Description: "HostNetwork specifies whether the pod may use the node network namespace",
Type: []string{"boolean"},
Format: "",
},
},
"topologySpreadConstraints": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "TopologySpreadConstraints controls how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_Authority(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "The Authority block defines the keys for validating signatures.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name for this authority.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"key": {
SchemaProps: spec.SchemaProps{
Description: "Key contains the public key to validate the resource.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.KeyRef"),
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.KeyRef"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_EmbeddedRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EmbeddedRunSpec allows custom task definitions to be embedded",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is a specification of a custom task",
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata", "k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_KeyRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "KeyRef defines the reference to a public key",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "SecretRef sets a reference to a secret with the key.",
Ref: ref("k8s.io/api/core/v1.SecretReference"),
},
},
"data": {
SchemaProps: spec.SchemaProps{
Description: "Data contains the inline public key.",
Type: []string{"string"},
Format: "",
},
},
"kms": {
SchemaProps: spec.SchemaProps{
Description: "KMS contains the KMS url of the public key Supported formats differ based on the KMS system used. One example of a KMS url could be: gcpkms://projects/[PROJECT]/locations/[LOCATION]>/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[KEY_VERSION] For more examples please refer https://docs.sigstore.dev/cosign/kms_support. Note that the KMS is not supported yet.",
Type: []string{"string"},
Format: "",
},
},
"hashAlgorithm": {
SchemaProps: spec.SchemaProps{
Description: "HashAlgorithm always defaults to sha256 if the algorithm hasn't been explicitly set",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.SecretReference"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_ResourcePattern(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourcePattern defines the pattern of the resource source",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pattern": {
SchemaProps: spec.SchemaProps{
Description: "Pattern defines a resource pattern. Regex is created to filter resources based on `Pattern` Example patterns: GitHub resource: https://github.com/tektoncd/catalog.git, https://github.com/tektoncd/* Bundle resource: gcr.io/tekton-releases/catalog/upstream/git-clone, gcr.io/tekton-releases/catalog/upstream/* Hub resource: https://artifacthub.io/*,",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"pattern"},
},
},
}
}
func schema_pkg_apis_pipeline_v1alpha1_Run(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Run represents a single execution of a Custom Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.RunSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1.RunStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.RunSpec", "github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1.RunStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_RunList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RunList contains a list of Run",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.Run"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.Run", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_RunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RunSpec defines the desired state of Run",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"ref": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is a specification of a custom task",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.EmbeddedRunSpec"),
},
},
"params": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"),
},
},
},
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Used for cancelling a run (and maybe more later on)",
Type: []string{"string"},
Format: "",
},
},
"statusMessage": {
SchemaProps: spec.SchemaProps{
Description: "Status message for cancellation.",
Type: []string{"string"},
Format: "",
},
},
"retries": {
SchemaProps: spec.SchemaProps{
Description: "Used for propagating retries count to custom tasks",
Type: []string{"integer"},
Format: "int32",
},
},
"serviceAccountName": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"podTemplate": {
SchemaProps: spec.SchemaProps{
Description: "PodTemplate holds pod specific configuration",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"),
},
},
"timeout": {
SchemaProps: spec.SchemaProps{
Description: "Time after which the custom-task times out. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"workspaces": {
SchemaProps: spec.SchemaProps{
Description: "Workspaces is a list of WorkspaceBindings from volumes to workspaces.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.EmbeddedRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_StepAction(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepAction represents the actionable components of Step. The Step can only reference it from the cluster or using remote resolution.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the Step from the client",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.StepActionSpec"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.StepActionSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_StepActionList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepActionList contains a list of StepActions",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.StepAction"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.StepAction", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_StepActionSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepActionSpec contains the actionable components of a step.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the stepaction that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "Image reference name to run for this StepAction. More info: https://kubernetes.io/docs/concepts/containers/images",
Type: []string{"string"},
Format: "",
},
},
"command": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"args": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables to set in the container. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"script": {
SchemaProps: spec.SchemaProps{
Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.",
Type: []string{"string"},
Format: "",
},
},
"workingDir": {
SchemaProps: spec.SchemaProps{
Description: "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params is a list of input parameters required to run the stepAction. Params must be supplied as inputs in Steps unless they declare a defaultvalue.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are values that this StepAction can output",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult"),
},
},
},
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ The value set in StepAction will take precedence over the value from Task.",
Ref: ref("k8s.io/api/core/v1.SecurityContext"),
},
},
"volumeMounts": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Volumes to mount into the Step's filesystem. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeMount"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeMount"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_VerificationPolicy(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "VerificationPolicy defines the rules to verify Tekton resources. VerificationPolicy can config the mapping from resources to a list of public keys, so when verifying the resources we can use the corresponding public keys.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the VerificationPolicy.",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.VerificationPolicySpec"),
},
},
},
Required: []string{"spec"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.VerificationPolicySpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_VerificationPolicyList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "VerificationPolicyList contains a list of VerificationPolicy",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.VerificationPolicy"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.VerificationPolicy", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1alpha1_VerificationPolicySpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "VerificationPolicySpec defines the patterns and authorities.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Resources defines the patterns of resources sources that should be subject to this policy. For example, we may want to apply this Policy from a certain GitHub repo. Then the ResourcesPattern should be valid regex. E.g. If using gitresolver, and we want to config keys from a certain git repo. `ResourcesPattern` can be `https://github.com/tektoncd/catalog.git`, we will use regex to filter out those resources.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.ResourcePattern"),
},
},
},
},
},
"authorities": {
SchemaProps: spec.SchemaProps{
Description: "Authorities defines the rules for validating signatures.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.Authority"),
},
},
},
},
},
"mode": {
SchemaProps: spec.SchemaProps{
Description: "Mode controls whether a failing policy will fail the taskrun/pipelinerun, or only log the warnings enforce - fail the taskrun/pipelinerun if verification fails (default) warn - don't fail the taskrun/pipelinerun if verification fails but log warnings",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"resources", "authorities"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.Authority", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1.ResourcePattern"},
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: pipeline.GroupName, Version: "v1alpha1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme adds Build types to the scheme.
AddToScheme = schemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Run{},
&RunList{},
&VerificationPolicy{},
&VerificationPolicyList{},
&StepAction{},
&StepActionList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"github.com/tektoncd/pipeline/pkg/apis/config"
"knative.dev/pkg/apis"
)
var _ apis.Defaultable = (*Run)(nil)
// SetDefaults implements apis.Defaultable
func (r *Run) SetDefaults(ctx context.Context) {
ctx = apis.WithinParent(ctx, r.ObjectMeta)
r.Spec.SetDefaults(apis.WithinSpec(ctx))
}
// SetDefaults implements apis.Defaultable
func (rs *RunSpec) SetDefaults(ctx context.Context) {
cfg := config.FromContextOrDefaults(ctx)
defaultSA := cfg.Defaults.DefaultServiceAccount
if rs.ServiceAccountName == "" && defaultSA != "" {
rs.ServiceAccountName = defaultSA
}
defaultPodTemplate := cfg.Defaults.DefaultPodTemplate
if rs.PodTemplate == nil {
rs.PodTemplate = defaultPodTemplate
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"time"
apisconfig "github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
runv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
// EmbeddedRunSpec allows custom task definitions to be embedded
type EmbeddedRunSpec struct {
runtime.TypeMeta `json:",inline"`
// +optional
Metadata v1beta1.PipelineTaskMetadata `json:"metadata,omitempty"`
// Spec is a specification of a custom task
// +optional
Spec runtime.RawExtension `json:"spec,omitempty"`
}
// RunSpec defines the desired state of Run
type RunSpec struct {
// +optional
Ref *v1beta1.TaskRef `json:"ref,omitempty"`
// Spec is a specification of a custom task
// +optional
Spec *EmbeddedRunSpec `json:"spec,omitempty"`
// +optional
Params v1beta1.Params `json:"params,omitempty"`
// Used for cancelling a run (and maybe more later on)
// +optional
Status RunSpecStatus `json:"status,omitempty"`
// Status message for cancellation.
// +optional
StatusMessage RunSpecStatusMessage `json:"statusMessage,omitempty"`
// Used for propagating retries count to custom tasks
// +optional
Retries int `json:"retries,omitempty"`
// +optional
ServiceAccountName string `json:"serviceAccountName"`
// PodTemplate holds pod specific configuration
// +optional
PodTemplate *pod.PodTemplate `json:"podTemplate,omitempty"`
// Time after which the custom-task times out.
// Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Workspaces is a list of WorkspaceBindings from volumes to workspaces.
// +optional
Workspaces []v1beta1.WorkspaceBinding `json:"workspaces,omitempty"`
}
// RunSpecStatus defines the taskrun spec status the user can provide
type RunSpecStatus string
const (
// RunSpecStatusCancelled indicates that the user wants to cancel the run,
// if not already cancelled or terminated
RunSpecStatusCancelled RunSpecStatus = "RunCancelled"
)
// RunSpecStatusMessage defines human readable status messages for the TaskRun.
type RunSpecStatusMessage string
const (
// RunCancelledByPipelineMsg indicates that the PipelineRun of which part this Run was
// has been cancelled.
RunCancelledByPipelineMsg RunSpecStatusMessage = "Run cancelled as the PipelineRun it belongs to has been cancelled."
// RunCancelledByPipelineTimeoutMsg indicates that the Run was cancelled because the PipelineRun running it timed out.
RunCancelledByPipelineTimeoutMsg RunSpecStatusMessage = "Run cancelled as the PipelineRun it belongs to has timed out."
)
// GetParam gets the Param from the RunSpec with the given name
// TODO(jasonhall): Move this to a Params type so other code can use it?
func (rs RunSpec) GetParam(name string) *v1beta1.Param {
for _, p := range rs.Params {
if p.Name == name {
return &p
}
}
return nil
}
// RunReason is an enum used to store all Run reason for the Succeeded condition that are controlled by the Run itself.
type RunReason string
const (
// RunReasonStarted is the reason set when the Run has just started.
RunReasonStarted RunReason = "Started"
// RunReasonRunning is the reason set when the Run is running.
RunReasonRunning RunReason = "Running"
// RunReasonSuccessful is the reason set when the Run completed successfully.
RunReasonSuccessful RunReason = "Succeeded"
// RunReasonFailed is the reason set when the Run completed with a failure.
RunReasonFailed RunReason = "Failed"
// RunReasonCancelled must be used in the Condition Reason to indicate that a Run was cancelled.
RunReasonCancelled RunReason = "RunCancelled"
// RunReasonTimedOut must be used in the Condition Reason to indicate that a Run was timed out.
RunReasonTimedOut RunReason = "RunTimedOut"
// RunReasonWorkspaceNotSupported can be used in the Condition Reason to indicate that the
// Run contains a workspace which is not supported by this custom task.
RunReasonWorkspaceNotSupported RunReason = "RunWorkspaceNotSupported"
// RunReasonPodTemplateNotSupported can be used in the Condition Reason to indicate that the
// Run contains a pod template which is not supported by this custom task.
RunReasonPodTemplateNotSupported RunReason = "RunPodTemplateNotSupported"
)
func (t RunReason) String() string {
return string(t)
}
// RunStatus defines the observed state of Run.
type RunStatus = runv1alpha1.RunStatus
var runCondSet = apis.NewBatchConditionSet()
// GetConditionSet retrieves the condition set for this resource. Implements
// the KRShaped interface.
func (r *Run) GetConditionSet() apis.ConditionSet { return runCondSet }
// GetStatus retrieves the status of the Parallel. Implements the KRShaped
// interface.
func (r *Run) GetStatus() *duckv1.Status { return &r.Status.Status }
// RunStatusFields holds the fields of Run's status. This is defined
// separately and inlined so that other types can readily consume these fields
// via duck typing.
type RunStatusFields = runv1alpha1.RunStatusFields
// RunResult used to describe the results of a task
type RunResult = runv1alpha1.RunResult
// +genclient
// +genreconciler
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Run represents a single execution of a Custom Task.
//
// +k8s:openapi-gen=true
type Run struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec RunSpec `json:"spec,omitempty"`
// +optional
Status RunStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RunList contains a list of Run
type RunList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []Run `json:"items"`
}
// GetStatusCondition returns the task run status as a ConditionAccessor
func (r *Run) GetStatusCondition() apis.ConditionAccessor {
return &r.Status
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*Run) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind(pipeline.RunControllerName)
}
// HasPipelineRunOwnerReference returns true of Run has
// owner reference of type PipelineRun
func (r *Run) HasPipelineRunOwnerReference() bool {
for _, ref := range r.GetOwnerReferences() {
if ref.Kind == pipeline.PipelineRunControllerName {
return true
}
}
return false
}
// IsCancelled returns true if the Run's spec status is set to Cancelled state
func (r *Run) IsCancelled() bool {
return r.Spec.Status == RunSpecStatusCancelled
}
// IsDone returns true if the Run's status indicates that it is done.
func (r *Run) IsDone() bool {
return !r.Status.GetCondition(apis.ConditionSucceeded).IsUnknown()
}
// HasStarted function check whether taskrun has valid start time set in its status
func (r *Run) HasStarted() bool {
return r.Status.StartTime != nil && !r.Status.StartTime.IsZero()
}
// IsSuccessful returns true if the Run's status indicates that it has succeeded.
func (r *Run) IsSuccessful() bool {
return r != nil && r.Status.GetCondition(apis.ConditionSucceeded).IsTrue()
}
// GetRunKey return the run's key for timeout handler map
func (r *Run) GetRunKey() string {
// The address of the pointer is a threadsafe unique identifier for the run
return fmt.Sprintf("%s/%p", "Run", r)
}
// HasTimedOut returns true if the Run's running time is beyond the allowed timeout
func (r *Run) HasTimedOut(c clock.PassiveClock) bool {
if r.Status.StartTime == nil || r.Status.StartTime.IsZero() {
return false
}
timeout := r.GetTimeout()
// If timeout is set to 0 or defaulted to 0, there is no timeout.
if timeout == apisconfig.NoTimeoutDuration {
return false
}
runtime := c.Since(r.Status.StartTime.Time)
return runtime > timeout
}
// GetTimeout returns the timeout for this run, or the default if not configured
func (r *Run) GetTimeout() time.Duration {
// Use the platform default if no timeout is set
if r.Spec.Timeout == nil {
return apisconfig.DefaultTimeoutMinutes * time.Minute
}
return r.Spec.Timeout.Duration
}
// GetRetryCount returns the number of times this Run has already been retried
func (r *Run) GetRetryCount() int {
return len(r.Status.RetriesStatus)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"fmt"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/apis/validate"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/api/equality"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var _ apis.Validatable = (*Run)(nil)
var _ resourcesemantics.VerbLimited = (*Run)(nil)
// SupportedVerbs returns the operations that validation should be called for
func (r *Run) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate taskrun
func (r *Run) Validate(ctx context.Context) *apis.FieldError {
if err := validate.ObjectMetadata(r.GetObjectMeta()).ViaField("metadata"); err != nil {
return err
}
return r.Spec.Validate(ctx)
}
// Validate Run spec
func (rs *RunSpec) Validate(ctx context.Context) *apis.FieldError {
// this covers the case rs.Ref == nil && rs.Spec == nil
if equality.Semantic.DeepEqual(rs, &RunSpec{}) {
return apis.ErrMissingField("spec")
}
if rs.Ref != nil && rs.Spec != nil {
return apis.ErrMultipleOneOf("spec.ref", "spec.spec")
}
if rs.Ref == nil && rs.Spec == nil {
return apis.ErrMissingOneOf("spec.ref", "spec.spec")
}
if rs.Ref != nil {
if rs.Ref.APIVersion == "" {
return apis.ErrMissingField("spec.ref.apiVersion")
}
if rs.Ref.Kind == "" {
return apis.ErrMissingField("spec.ref.kind")
}
}
if rs.Spec != nil {
if rs.Spec.APIVersion == "" {
return apis.ErrMissingField("spec.spec.apiVersion")
}
if rs.Spec.Kind == "" {
return apis.ErrMissingField("spec.spec.kind")
}
}
if rs.Status == "" {
if rs.StatusMessage != "" {
return apis.ErrInvalidValue(fmt.Sprintf("statusMessage should not be set if status is not set, but it is currently set to %s", rs.StatusMessage), "statusMessage")
}
}
if err := v1beta1.ValidateParameters(ctx, rs.Params).ViaField("spec.params"); err != nil {
return err
}
return v1beta1.ValidateWorkspaceBindings(ctx, rs.Workspaces).ViaField("spec.workspaces")
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"fmt"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"knative.dev/pkg/apis"
)
var _ apis.Convertible = (*StepAction)(nil)
// ConvertTo implements apis.Convertible
func (s *StepAction) ConvertTo(ctx context.Context, to apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
switch sink := to.(type) {
case *v1beta1.StepAction:
sink.ObjectMeta = s.ObjectMeta
return s.Spec.ConvertTo(ctx, &sink.Spec)
default:
return fmt.Errorf("unknown version, got: %T", sink)
}
}
// ConvertTo implements apis.Convertible
func (ss *StepActionSpec) ConvertTo(ctx context.Context, sink *v1beta1.StepActionSpec) error {
sink.Description = ss.Description
sink.Image = ss.Image
sink.Command = ss.Command
sink.Args = ss.Args
sink.Env = ss.Env
sink.Script = ss.Script
sink.WorkingDir = ss.WorkingDir
sink.Params = ss.Params
sink.Results = ss.Results
sink.SecurityContext = ss.SecurityContext
sink.VolumeMounts = ss.VolumeMounts
return nil
}
// ConvertFrom implements apis.Convertible
func (s *StepAction) ConvertFrom(ctx context.Context, from apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
switch source := from.(type) {
case *v1beta1.StepAction:
s.ObjectMeta = source.ObjectMeta
return s.Spec.ConvertFrom(ctx, &source.Spec)
default:
return fmt.Errorf("unknown version, got: %T", source)
}
}
// ConvertFrom implements apis.Convertible
func (ss *StepActionSpec) ConvertFrom(ctx context.Context, source *v1beta1.StepActionSpec) error {
ss.Description = source.Description
ss.Image = source.Image
ss.Command = source.Command
ss.Args = source.Args
ss.Env = source.Env
ss.Script = source.Script
ss.WorkingDir = source.WorkingDir
ss.Params = source.Params
ss.Results = source.Results
ss.SecurityContext = source.SecurityContext
ss.VolumeMounts = source.VolumeMounts
return nil
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"knative.dev/pkg/apis"
)
var _ apis.Defaultable = (*StepAction)(nil)
// SetDefaults implements apis.Defaultable
func (s *StepAction) SetDefaults(ctx context.Context) {
s.Spec.SetDefaults(ctx)
}
// SetDefaults set any defaults for the StepAction spec
func (ss *StepActionSpec) SetDefaults(ctx context.Context) {
for i := range ss.Params {
ss.Params[i].SetDefaults(ctx)
}
for i := range ss.Results {
ss.Results[i].SetDefaults(ctx)
}
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/apis"
"knative.dev/pkg/kmeta"
)
// +genclient
// +genclient:noStatus
// +genreconciler:krshapedlogic=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// StepAction represents the actionable components of Step.
// The Step can only reference it from the cluster or using remote resolution.
//
// +k8s:openapi-gen=true
type StepAction struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata"`
// Spec holds the desired state of the Step from the client
// +optional
Spec StepActionSpec `json:"spec"`
}
var _ kmeta.OwnerRefable = (*StepAction)(nil)
// StepAction returns the step action's spec
func (s *StepAction) StepActionSpec() StepActionSpec {
return s.Spec
}
// StepActionMetadata returns the step action's ObjectMeta
func (s *StepAction) StepActionMetadata() metav1.ObjectMeta {
return s.ObjectMeta
}
// Copy returns a deep copy of the stepaction
func (s *StepAction) Copy() StepActionObject {
return s.DeepCopy()
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*StepAction) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind("StepAction")
}
// StepActionList contains a list of StepActions
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type StepActionList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []StepAction `json:"items"`
}
// StepActionSpec contains the actionable components of a step.
type StepActionSpec struct {
// Description is a user-facing description of the stepaction that may be
// used to populate a UI.
// +optional
Description string `json:"description,omitempty"`
// Image reference name to run for this StepAction.
// More info: https://kubernetes.io/docs/concepts/containers/images
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=atomic
Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge" protobuf:"bytes,7,rep,name=env"`
// Script is the contents of an executable file to execute.
//
// If Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.
// +optional
Script string `json:"script,omitempty"`
// Step's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// Params is a list of input parameters required to run the stepAction.
// Params must be supplied as inputs in Steps unless they declare a defaultvalue.
// +optional
Params v1.ParamSpecs `json:"params,omitempty"`
// Results are values that this StepAction can output
// +optional
// +listType=atomic
Results []v1.StepResult `json:"results,omitempty"`
// SecurityContext defines the security options the Step should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// The value set in StepAction will take precedence over the value from Task.
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Volumes to mount into the Step's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
// +listType=atomic
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchMergeKey:"mountPath" patchStrategy:"merge" protobuf:"bytes,9,rep,name=volumeMounts"`
}
// ToStep converts the StepActionSpec to a Step struct
func (ss *StepActionSpec) ToStep() *v1.Step {
return &v1.Step{
Image: ss.Image,
Command: ss.Command,
Args: ss.Args,
WorkingDir: ss.WorkingDir,
Script: ss.Script,
Env: ss.Env,
VolumeMounts: ss.VolumeMounts,
SecurityContext: ss.SecurityContext,
Results: ss.Results,
}
}
// StepActionObject is implemented by StepAction
type StepActionObject interface {
apis.Defaultable
StepActionMetadata() metav1.ObjectMeta
StepActionSpec() StepActionSpec
Copy() StepActionObject
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"fmt"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/substitution"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var (
_ apis.Validatable = (*StepAction)(nil)
_ resourcesemantics.VerbLimited = (*StepAction)(nil)
)
// SupportedVerbs returns the operations that validation should be called for
func (s *StepAction) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate implements apis.Validatable
func (s *StepAction) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = validate.ObjectMetadata(s.GetObjectMeta()).ViaField("metadata")
errs = errs.Also(s.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec"))
return errs
}
// Validate implements apis.Validatable
func (ss *StepActionSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
if ss.Image == "" {
errs = errs.Also(apis.ErrMissingField("Image"))
}
if ss.Script != "" {
if len(ss.Command) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "script cannot be used with command",
Paths: []string{"script"},
})
}
cleaned := strings.TrimSpace(ss.Script)
if strings.HasPrefix(cleaned, "#!win") {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script"))
}
errs = errs.Also(validateNoParamSubstitutionsInScript(ss.Script))
}
errs = errs.Also(validateUsageOfDeclaredParameters(ctx, *ss))
errs = errs.Also(v1.ValidateParameterTypes(ctx, ss.Params).ViaField("params"))
errs = errs.Also(validateParameterVariables(ctx, *ss, ss.Params))
errs = errs.Also(v1.ValidateStepResultsVariables(ctx, ss.Results, ss.Script))
errs = errs.Also(v1.ValidateStepResults(ctx, ss.Results).ViaField("results"))
errs = errs.Also(validateVolumeMounts(ss.VolumeMounts, ss.Params).ViaField("volumeMounts"))
return errs
}
// validateNoParamSubstitutionsInScript validates that param substitutions are not invoked in the script
func validateNoParamSubstitutionsInScript(script string) *apis.FieldError {
_, present, errString := substitution.ExtractVariablesFromString(script, "params")
if errString != "" || present {
return &apis.FieldError{
Message: "param substitution in scripts is not allowed.",
Paths: []string{"script"},
}
}
return nil
}
// validateUsageOfDeclaredParameters validates that all parameters referenced in the Task are declared by the Task.
func validateUsageOfDeclaredParameters(ctx context.Context, sas StepActionSpec) *apis.FieldError {
params := sas.Params
var errs *apis.FieldError
_, _, objectParams := params.SortByType()
allParameterNames := sets.NewString(params.GetNames()...)
errs = errs.Also(validateStepActionVariables(ctx, sas, "params", allParameterNames))
errs = errs.Also(validateObjectUsage(ctx, sas, objectParams))
errs = errs.Also(v1.ValidateObjectParamsHaveProperties(ctx, params))
return errs
}
func validateVolumeMounts(volumeMounts []corev1.VolumeMount, params v1.ParamSpecs) (errs *apis.FieldError) {
if len(volumeMounts) == 0 {
return
}
paramNames := sets.String{}
for _, p := range params {
paramNames.Insert(p.Name)
}
for idx, v := range volumeMounts {
matches, _ := substitution.ExtractVariableExpressions(v.Name, "params")
if len(matches) != 1 {
errs = errs.Also(apis.ErrInvalidValue(v.Name, "name", "expect the Name to be a single param reference").ViaIndex(idx))
return errs
} else if matches[0] != v.Name {
errs = errs.Also(apis.ErrInvalidValue(v.Name, "name", "expect the Name to be a single param reference").ViaIndex(idx))
return errs
}
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.Name, "params", paramNames).ViaIndex(idx))
}
return errs
}
// validateParameterVariables validates all variables within a slice of ParamSpecs against a StepAction
func validateParameterVariables(ctx context.Context, sas StepActionSpec, params v1.ParamSpecs) *apis.FieldError {
var errs *apis.FieldError
errs = errs.Also(params.ValidateNoDuplicateNames())
stringParams, arrayParams, objectParams := params.SortByType()
stringParameterNames := sets.NewString(stringParams.GetNames()...)
arrayParameterNames := sets.NewString(arrayParams.GetNames()...)
errs = errs.Also(v1.ValidateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParams))
errs = errs.Also(validateStepActionArrayUsage(sas, "params", arrayParameterNames))
return errs.Also(validateDefaultParameterReferences(params))
}
// validateDefaultParameterReferences ensures that parameters referenced in default values are defined
func validateDefaultParameterReferences(params v1.ParamSpecs) *apis.FieldError {
var errs *apis.FieldError
allParams := sets.NewString(params.GetNames()...)
// First pass: collect all parameters that have no references in their default values
resolvedParams := sets.NewString()
paramsNeedingResolution := make(map[string][]string)
for _, p := range params {
if p.Default != nil {
matches, _ := substitution.ExtractVariableExpressions(p.Default.StringVal, "params")
if len(matches) == 0 {
resolvedParams.Insert(p.Name)
} else {
// Track which parameters this parameter depends on
referencedParams := make([]string, 0, len(matches))
hasUndefinedParam := false
for _, match := range matches {
paramName := strings.TrimSuffix(strings.TrimPrefix(match, "$(params."), ")")
if !allParams.Has(paramName) {
hasUndefinedParam = true
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("param %q default value references param %q which is not defined", p.Name, paramName),
Paths: []string{"params"},
})
}
referencedParams = append(referencedParams, paramName)
}
// Only track dependencies if all referenced parameters exist
if !hasUndefinedParam {
paramsNeedingResolution[p.Name] = referencedParams
}
}
} else {
resolvedParams.Insert(p.Name)
}
}
// Second pass: iteratively resolve parameters whose dependencies are satisfied
for len(paramsNeedingResolution) > 0 {
paramWasResolved := false
for paramName, referencedParams := range paramsNeedingResolution {
canResolveParam := true
for _, referencedParam := range referencedParams {
if !resolvedParams.Has(referencedParam) {
canResolveParam = false
break
}
}
if canResolveParam {
resolvedParams.Insert(paramName)
delete(paramsNeedingResolution, paramName)
paramWasResolved = true
}
}
if !paramWasResolved {
// If we couldn't resolve any parameters in this iteration,
// we have a circular dependency
for paramName := range paramsNeedingResolution {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("param %q default value has a circular dependency", paramName),
Paths: []string{"params"},
})
}
break
}
}
return errs
}
// validateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object
func validateObjectUsage(ctx context.Context, sas StepActionSpec, params v1.ParamSpecs) (errs *apis.FieldError) {
objectParameterNames := sets.NewString()
for _, p := range params {
// collect all names of object type params
objectParameterNames.Insert(p.Name)
// collect all keys for this object param
objectKeys := sets.NewString()
for key := range p.Properties {
objectKeys.Insert(key)
}
// check if the object's key names are referenced correctly i.e. param.objectParam.key1
errs = errs.Also(validateStepActionVariables(ctx, sas, "params\\."+p.Name, objectKeys))
}
return errs.Also(validateStepActionObjectUsageAsWhole(sas, "params", objectParameterNames))
}
// validateStepActionObjectUsageAsWhole returns an error if the StepAction contains references to the entire input object params in fields where these references are prohibited
func validateStepActionObjectUsageAsWhole(sas StepActionSpec, prefix string, vars sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToEntireProhibitedVariables(sas.Image, prefix, vars).ViaField("image")
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(sas.Script, prefix, vars).ViaField("script"))
for i, cmd := range sas.Command {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(cmd, prefix, vars).ViaFieldIndex("command", i))
}
for i, arg := range sas.Args {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(arg, prefix, vars).ViaFieldIndex("args", i))
}
for _, env := range sas.Env {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name))
}
for i, vm := range sas.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(vm.Name, prefix, vars).ViaFieldIndex("volumeMounts", i))
}
return errs
}
// validateStepActionArrayUsage returns an error if the Step contains references to the input array params in fields where these references are prohibited
func validateStepActionArrayUsage(sas StepActionSpec, prefix string, arrayParamNames sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToProhibitedVariables(sas.Image, prefix, arrayParamNames).ViaField("image")
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(sas.Script, prefix, arrayParamNames).ViaField("script"))
for i, cmd := range sas.Command {
errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(cmd, prefix, arrayParamNames).ViaFieldIndex("command", i))
}
for i, arg := range sas.Args {
errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(arg, prefix, arrayParamNames).ViaFieldIndex("args", i))
}
for _, env := range sas.Env {
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(env.Value, prefix, arrayParamNames).ViaFieldKey("env", env.Name))
}
for i, vm := range sas.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(vm.Name, prefix, arrayParamNames).ViaFieldIndex("volumeMounts", i))
}
return errs
}
// validateStepActionVariables returns an error if the StepAction contains references to any unknown variables
func validateStepActionVariables(ctx context.Context, sas StepActionSpec, prefix string, vars sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToUnknownVariables(sas.Image, prefix, vars).ViaField("image")
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(sas.Script, prefix, vars).ViaField("script"))
for i, cmd := range sas.Command {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(cmd, prefix, vars).ViaFieldIndex("command", i))
}
for i, arg := range sas.Args {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(arg, prefix, vars).ViaFieldIndex("args", i))
}
for _, env := range sas.Env {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name))
}
for i, vm := range sas.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(vm.Name, prefix, vars).ViaFieldIndex("volumeMounts", i))
}
return errs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"knative.dev/pkg/apis"
)
var _ apis.Defaultable = (*VerificationPolicy)(nil)
// SetDefaults implements apis.Defaultable
func (v *VerificationPolicy) SetDefaults(ctx context.Context) {
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"crypto"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// +genclient
// +genclient:noStatus
// +genreconciler:krshapedlogic=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VerificationPolicy defines the rules to verify Tekton resources.
// VerificationPolicy can config the mapping from resources to a list of public
// keys, so when verifying the resources we can use the corresponding public keys.
// +k8s:openapi-gen=true
type VerificationPolicy struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata"`
// Spec holds the desired state of the VerificationPolicy.
Spec VerificationPolicySpec `json:"spec"`
}
// VerificationPolicyList contains a list of VerificationPolicy
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type VerificationPolicyList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []VerificationPolicy `json:"items"`
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*VerificationPolicy) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind("VerificationPolicy")
}
// VerificationPolicySpec defines the patterns and authorities.
type VerificationPolicySpec struct {
// Resources defines the patterns of resources sources that should be subject to this policy.
// For example, we may want to apply this Policy from a certain GitHub repo.
// Then the ResourcesPattern should be valid regex. E.g. If using gitresolver, and we want to config keys from a certain git repo.
// `ResourcesPattern` can be `https://github.com/tektoncd/catalog.git`, we will use regex to filter out those resources.
Resources []ResourcePattern `json:"resources"`
// Authorities defines the rules for validating signatures.
Authorities []Authority `json:"authorities"`
// Mode controls whether a failing policy will fail the taskrun/pipelinerun, or only log the warnings
// enforce - fail the taskrun/pipelinerun if verification fails (default)
// warn - don't fail the taskrun/pipelinerun if verification fails but log warnings
// +optional
Mode ModeType `json:"mode,omitempty"`
}
// ResourcePattern defines the pattern of the resource source
type ResourcePattern struct {
// Pattern defines a resource pattern. Regex is created to filter resources based on `Pattern`
// Example patterns:
// GitHub resource: https://github.com/tektoncd/catalog.git, https://github.com/tektoncd/*
// Bundle resource: gcr.io/tekton-releases/catalog/upstream/git-clone, gcr.io/tekton-releases/catalog/upstream/*
// Hub resource: https://artifacthub.io/*,
Pattern string `json:"pattern"`
}
// The Authority block defines the keys for validating signatures.
type Authority struct {
// Name is the name for this authority.
Name string `json:"name"`
// Key contains the public key to validate the resource.
Key *KeyRef `json:"key,omitempty"`
}
// ModeType indicates the type of a mode for VerificationPolicy
type ModeType string
// Valid ModeType:
const (
ModeWarn ModeType = "warn"
ModeEnforce ModeType = "enforce"
)
// KeyRef defines the reference to a public key
type KeyRef struct {
// SecretRef sets a reference to a secret with the key.
// +optional
SecretRef *v1.SecretReference `json:"secretRef,omitempty"`
// Data contains the inline public key.
// +optional
Data string `json:"data,omitempty"`
// KMS contains the KMS url of the public key
// Supported formats differ based on the KMS system used.
// One example of a KMS url could be:
// gcpkms://projects/[PROJECT]/locations/[LOCATION]>/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[KEY_VERSION]
// For more examples please refer https://docs.sigstore.dev/cosign/kms_support.
// Note that the KMS is not supported yet.
// +optional
KMS string `json:"kms,omitempty"`
// HashAlgorithm always defaults to sha256 if the algorithm hasn't been explicitly set
// +optional
HashAlgorithm HashAlgorithm `json:"hashAlgorithm,omitempty"`
}
// HashAlgorithm defines the hash algorithm used for the public key
type HashAlgorithm string
const (
sha224 HashAlgorithm = "sha224"
sha256 HashAlgorithm = "sha256"
sha384 HashAlgorithm = "sha384"
sha512 HashAlgorithm = "sha512"
empty HashAlgorithm = ""
)
// SupportedSignatureAlgorithms sets a list of support signature algorithms that is similar to the list supported by cosign.
// empty HashAlgorithm is allowed and will be set to SHA256.
var SupportedSignatureAlgorithms = map[HashAlgorithm]crypto.Hash{
sha224: crypto.SHA224,
sha256: crypto.SHA256,
sha384: crypto.SHA384,
sha512: crypto.SHA512,
empty: crypto.SHA256,
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"fmt"
"regexp"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/validate"
"knative.dev/pkg/apis"
)
var _ apis.Validatable = (*VerificationPolicy)(nil)
var (
// InvalidResourcePatternErr is returned when the pattern is not valid regex expression
InvalidResourcePatternErr = "resourcePattern cannot be compiled by regex"
)
// Validate VerificationPolicy
func (v *VerificationPolicy) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(validate.ObjectMetadata(v.GetObjectMeta()).ViaField("metadata"))
errs = errs.Also(v.Spec.Validate(ctx))
return errs
}
// Validate VerificationPolicySpec, the validation requires Resources is not empty, for each
// resource it must be able to be regex expression and can be compiled with no error. The Authorities
// shouldn't be empty and each Authority should be valid.
func (vs *VerificationPolicySpec) Validate(ctx context.Context) (errs *apis.FieldError) {
if len(vs.Resources) == 0 {
errs = errs.Also(apis.ErrMissingField("resources"))
}
for _, r := range vs.Resources {
errs = errs.Also(r.Validate(ctx))
}
if len(vs.Authorities) == 0 {
errs = errs.Also(apis.ErrMissingField("authorities"))
}
for i, a := range vs.Authorities {
if a.Key != nil {
errs = errs.Also(a.Key.Validate(ctx).ViaFieldIndex("key", i))
}
}
if vs.Mode != "" && vs.Mode != ModeEnforce && vs.Mode != ModeWarn {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("available values are: %s, %s, but got: %s", ModeEnforce, ModeWarn, vs.Mode), "mode"))
}
return errs
}
// Validate KeyRef will check if one of KeyRef's Data or SecretRef exists, and the
// Supported HashAlgorithm is in supportedSignatureAlgorithms.
func (key *KeyRef) Validate(ctx context.Context) (errs *apis.FieldError) {
// Validate that one and only one of Data, SecretRef, KMS is defined.
keyCount := 0
if key.Data != "" {
keyCount++
}
if key.SecretRef != nil {
keyCount++
}
if key.KMS != "" {
keyCount++
}
switch keyCount {
case 0:
errs = errs.Also(apis.ErrMissingOneOf("data", "kms", "secretref"))
case 1:
// do nothing -- a single key definition is valid
default:
errs = errs.Also(apis.ErrMultipleOneOf("data", "kms", "secretref"))
}
errs = errs.Also(validateHashAlgorithm(key.HashAlgorithm))
return errs
}
// Validate ResourcePattern and make sure the Pattern is valid regex expression
func (r *ResourcePattern) Validate(ctx context.Context) (errs *apis.FieldError) {
if _, err := regexp.Compile(r.Pattern); err != nil {
errs = errs.Also(apis.ErrInvalidValue(r.Pattern, "ResourcePattern", fmt.Sprintf("%v: %v", InvalidResourcePatternErr, err)))
return errs
}
return nil
}
// validateHashAlgorithm checks if the algorithm is supported
func validateHashAlgorithm(algorithmName HashAlgorithm) (errs *apis.FieldError) {
normalizedAlgo := strings.ToLower(string(algorithmName))
_, exists := SupportedSignatureAlgorithms[HashAlgorithm(normalizedAlgo)]
if !exists {
return apis.ErrInvalidValue(algorithmName, "HashAlgorithm")
}
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Authority) DeepCopyInto(out *Authority) {
*out = *in
if in.Key != nil {
in, out := &in.Key, &out.Key
*out = new(KeyRef)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authority.
func (in *Authority) DeepCopy() *Authority {
if in == nil {
return nil
}
out := new(Authority)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EmbeddedRunSpec) DeepCopyInto(out *EmbeddedRunSpec) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Metadata.DeepCopyInto(&out.Metadata)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedRunSpec.
func (in *EmbeddedRunSpec) DeepCopy() *EmbeddedRunSpec {
if in == nil {
return nil
}
out := new(EmbeddedRunSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KeyRef) DeepCopyInto(out *KeyRef) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(v1.SecretReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyRef.
func (in *KeyRef) DeepCopy() *KeyRef {
if in == nil {
return nil
}
out := new(KeyRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePattern) DeepCopyInto(out *ResourcePattern) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePattern.
func (in *ResourcePattern) DeepCopy() *ResourcePattern {
if in == nil {
return nil
}
out := new(ResourcePattern)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Run) DeepCopyInto(out *Run) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Run.
func (in *Run) DeepCopy() *Run {
if in == nil {
return nil
}
out := new(Run)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Run) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunList) DeepCopyInto(out *RunList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Run, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunList.
func (in *RunList) DeepCopy() *RunList {
if in == nil {
return nil
}
out := new(RunList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RunList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunSpec) DeepCopyInto(out *RunSpec) {
*out = *in
if in.Ref != nil {
in, out := &in.Ref, &out.Ref
*out = new(v1beta1.TaskRef)
(*in).DeepCopyInto(*out)
}
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(EmbeddedRunSpec)
(*in).DeepCopyInto(*out)
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(v1beta1.Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PodTemplate != nil {
in, out := &in.PodTemplate, &out.PodTemplate
*out = new(pod.Template)
(*in).DeepCopyInto(*out)
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]v1beta1.WorkspaceBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunSpec.
func (in *RunSpec) DeepCopy() *RunSpec {
if in == nil {
return nil
}
out := new(RunSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepAction) DeepCopyInto(out *StepAction) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepAction.
func (in *StepAction) DeepCopy() *StepAction {
if in == nil {
return nil
}
out := new(StepAction)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StepAction) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepActionList) DeepCopyInto(out *StepActionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]StepAction, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepActionList.
func (in *StepActionList) DeepCopy() *StepActionList {
if in == nil {
return nil
}
out := new(StepActionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StepActionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepActionSpec) DeepCopyInto(out *StepActionSpec) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]v1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(pipelinev1.ParamSpecs, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]pipelinev1.StepResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(v1.SecurityContext)
(*in).DeepCopyInto(*out)
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepActionSpec.
func (in *StepActionSpec) DeepCopy() *StepActionSpec {
if in == nil {
return nil
}
out := new(StepActionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VerificationPolicy) DeepCopyInto(out *VerificationPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationPolicy.
func (in *VerificationPolicy) DeepCopy() *VerificationPolicy {
if in == nil {
return nil
}
out := new(VerificationPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VerificationPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VerificationPolicyList) DeepCopyInto(out *VerificationPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]VerificationPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationPolicyList.
func (in *VerificationPolicyList) DeepCopy() *VerificationPolicyList {
if in == nil {
return nil
}
out := new(VerificationPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VerificationPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VerificationPolicySpec) DeepCopyInto(out *VerificationPolicySpec) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]ResourcePattern, len(*in))
copy(*out, *in)
}
if in.Authorities != nil {
in, out := &in.Authorities, &out.Authorities
*out = make([]Authority, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationPolicySpec.
func (in *VerificationPolicySpec) DeepCopy() *VerificationPolicySpec {
if in == nil {
return nil
}
out := new(VerificationPolicySpec)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
)
func (r Ref) convertTo(ctx context.Context, sink *v1.Ref) {
sink.Name = r.Name
new := v1.ResolverRef{}
r.ResolverRef.convertTo(ctx, &new)
sink.ResolverRef = new
}
func (r *Ref) convertFrom(ctx context.Context, source v1.Ref) {
r.Name = source.Name
new := ResolverRef{}
new.convertFrom(ctx, source.ResolverRef)
r.ResolverRef = new
}
func (s Step) convertTo(ctx context.Context, sink *v1.Step) {
sink.Name = s.Name
sink.DisplayName = s.DisplayName
sink.Image = s.Image
sink.Command = s.Command
sink.Args = s.Args
sink.WorkingDir = s.WorkingDir
sink.EnvFrom = s.EnvFrom
sink.Env = s.Env
sink.ComputeResources = s.Resources
sink.VolumeMounts = s.VolumeMounts
sink.VolumeDevices = s.VolumeDevices
sink.ImagePullPolicy = s.ImagePullPolicy
sink.SecurityContext = s.SecurityContext
sink.Script = s.Script
sink.Timeout = s.Timeout
sink.Workspaces = nil
for _, w := range s.Workspaces {
new := v1.WorkspaceUsage{}
w.convertTo(ctx, &new)
sink.Workspaces = append(sink.Workspaces, new)
}
sink.OnError = (v1.OnErrorType)(s.OnError)
sink.StdoutConfig = (*v1.StepOutputConfig)(s.StdoutConfig)
sink.StderrConfig = (*v1.StepOutputConfig)(s.StderrConfig)
if s.Ref != nil {
sink.Ref = &v1.Ref{}
s.Ref.convertTo(ctx, sink.Ref)
}
sink.Params = nil
for _, p := range s.Params {
new := v1.Param{}
p.convertTo(ctx, &new)
sink.Params = append(sink.Params, new)
}
sink.Results = s.Results
for _, w := range s.When {
new := v1.WhenExpression{}
w.convertTo(ctx, &new)
sink.When = append(sink.When, new)
}
}
func (s *Step) convertFrom(ctx context.Context, source v1.Step) {
s.Name = source.Name
s.DisplayName = source.DisplayName
s.Image = source.Image
s.Command = source.Command
s.Args = source.Args
s.WorkingDir = source.WorkingDir
s.EnvFrom = source.EnvFrom
s.Env = source.Env
s.Resources = source.ComputeResources
s.VolumeMounts = source.VolumeMounts
s.VolumeDevices = source.VolumeDevices
s.ImagePullPolicy = source.ImagePullPolicy
s.SecurityContext = source.SecurityContext
s.Script = source.Script
s.Timeout = source.Timeout
s.Workspaces = nil
for _, w := range source.Workspaces {
new := WorkspaceUsage{}
new.convertFrom(ctx, w)
s.Workspaces = append(s.Workspaces, new)
}
s.OnError = (OnErrorType)(source.OnError)
s.StdoutConfig = (*StepOutputConfig)(source.StdoutConfig)
s.StderrConfig = (*StepOutputConfig)(source.StderrConfig)
if source.Ref != nil {
newRef := Ref{}
newRef.convertFrom(ctx, *source.Ref)
s.Ref = &newRef
}
s.Params = nil
for _, p := range source.Params {
new := Param{}
new.ConvertFrom(ctx, p)
s.Params = append(s.Params, new)
}
s.Results = source.Results
for _, w := range source.When {
new := WhenExpression{}
new.convertFrom(ctx, w)
s.When = append(s.When, new)
}
}
func (s StepTemplate) convertTo(ctx context.Context, sink *v1.StepTemplate) {
sink.Image = s.Image
sink.Command = s.Command
sink.Args = s.Args
sink.WorkingDir = s.WorkingDir
sink.EnvFrom = s.EnvFrom
sink.Env = s.Env
sink.ComputeResources = s.Resources
sink.VolumeMounts = s.VolumeMounts
sink.VolumeDevices = s.VolumeDevices
sink.ImagePullPolicy = s.ImagePullPolicy
sink.SecurityContext = s.SecurityContext
// TODO(#4546): Handle deprecated fields
// Name, Ports, LivenessProbe, ReadinessProbe, StartupProbe, Lifecycle, TerminationMessagePath
// TerminationMessagePolicy, Stdin, StdinOnce, TTY
}
func (s *StepTemplate) convertFrom(ctx context.Context, source *v1.StepTemplate) {
s.Image = source.Image
s.Command = source.Command
s.Args = source.Args
s.WorkingDir = source.WorkingDir
s.EnvFrom = source.EnvFrom
s.Env = source.Env
s.Resources = source.ComputeResources
s.VolumeMounts = source.VolumeMounts
s.VolumeDevices = source.VolumeDevices
s.ImagePullPolicy = source.ImagePullPolicy
s.SecurityContext = source.SecurityContext
}
func (s Sidecar) convertTo(ctx context.Context, sink *v1.Sidecar) {
sink.Name = s.Name
sink.Image = s.Image
sink.Command = s.Command
sink.Args = s.Args
sink.WorkingDir = s.WorkingDir
sink.Ports = s.Ports
sink.EnvFrom = s.EnvFrom
sink.Env = s.Env
sink.ComputeResources = s.Resources
sink.VolumeMounts = s.VolumeMounts
sink.VolumeDevices = s.VolumeDevices
sink.LivenessProbe = s.LivenessProbe
sink.ReadinessProbe = s.ReadinessProbe
sink.StartupProbe = s.StartupProbe
sink.Lifecycle = s.Lifecycle
sink.TerminationMessagePath = s.TerminationMessagePath
sink.TerminationMessagePolicy = s.TerminationMessagePolicy
sink.ImagePullPolicy = s.ImagePullPolicy
sink.SecurityContext = s.SecurityContext
sink.Stdin = s.Stdin
sink.StdinOnce = s.StdinOnce
sink.TTY = s.TTY
sink.Script = s.Script
sink.Workspaces = nil
for _, w := range s.Workspaces {
new := v1.WorkspaceUsage{}
w.convertTo(ctx, &new)
sink.Workspaces = append(sink.Workspaces, new)
}
}
func (s *Sidecar) convertFrom(ctx context.Context, source v1.Sidecar) {
s.Name = source.Name
s.Image = source.Image
s.Command = source.Command
s.Args = source.Args
s.WorkingDir = source.WorkingDir
s.Ports = source.Ports
s.EnvFrom = source.EnvFrom
s.Env = source.Env
s.Resources = source.ComputeResources
s.VolumeMounts = source.VolumeMounts
s.VolumeDevices = source.VolumeDevices
s.LivenessProbe = source.LivenessProbe
s.ReadinessProbe = source.ReadinessProbe
s.StartupProbe = source.StartupProbe
s.Lifecycle = source.Lifecycle
s.TerminationMessagePath = source.TerminationMessagePath
s.TerminationMessagePolicy = source.TerminationMessagePolicy
s.ImagePullPolicy = source.ImagePullPolicy
s.SecurityContext = source.SecurityContext
s.Stdin = source.Stdin
s.StdinOnce = source.StdinOnce
s.TTY = source.TTY
s.Script = source.Script
s.Workspaces = nil
for _, w := range source.Workspaces {
new := WorkspaceUsage{}
new.convertFrom(ctx, w)
s.Workspaces = append(s.Workspaces, new)
}
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Step runs a subcomponent of a Task
type Step struct {
// Name of the Step specified as a DNS_LABEL.
// Each Step in a Task must have a unique name.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// DisplayName is a user-facing name of the step that may be
// used to populate a UI.
// +optional
DisplayName string `json:"displayName,omitempty"`
// Image reference name to run for this Step.
// More info: https://kubernetes.io/docs/concepts/containers/images
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Step's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the Step's container. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
//
// Deprecated: This field will be removed in a future release.
//
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
// +listType=map
// +listMapKey=containerPort
// +listMapKey=protocol
DeprecatedPorts []corev1.ContainerPort `json:"ports,omitempty" patchMergeKey:"containerPort" patchStrategy:"merge" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
// +listType=atomic
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=atomic
Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this Step.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Volumes to mount into the Step's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
// +listType=atomic
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchMergeKey:"mountPath" patchStrategy:"merge" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the Step.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
// +listType=atomic
VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchMergeKey:"devicePath" patchStrategy:"merge" protobuf:"bytes,21,rep,name=volumeDevices"`
// Periodic probe of container liveness.
// Step will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedLivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of container service readiness.
// Step will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// DeprecatedStartupProbe indicates that the Pod this Step runs in has successfully initialized.
// If specified, no other probes are executed until this completes successfully.
// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
// when it might take a long time to load data or warm a cache, than during steady-state operation.
// This cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedStartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
// Actions that the management system should take in response to container lifecycle events.
// Cannot be updated.
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedLifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Deprecated: This field will be removed in a future release and can't be meaningfully used.
// +optional
DeprecatedTerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Deprecated: This field will be removed in a future release and can't be meaningfully used.
// +optional
DeprecatedTerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// SecurityContext defines the security options the Step should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these are deprecated and should not be used.
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
// Default is false.
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedStdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedStdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true.
// Default is false.
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedTTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
// Script is the contents of an executable file to execute.
//
// If Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.
// +optional
Script string `json:"script,omitempty"`
// Timeout is the time after which the step times out. Defaults to never.
// Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha"
// for this field to be supported.
//
// Workspaces is a list of workspaces from the Task that this Step wants
// exclusive access to. Adding a workspace to this list means that any
// other Step or Sidecar that does not also request this Workspace will
// not have access to it.
// +optional
// +listType=atomic
Workspaces []WorkspaceUsage `json:"workspaces,omitempty"`
// OnError defines the exiting behavior of a container on error
// can be set to [ continue | stopAndFail ]
OnError OnErrorType `json:"onError,omitempty"`
// Stores configuration for the stdout stream of the step.
// +optional
StdoutConfig *StepOutputConfig `json:"stdoutConfig,omitempty"`
// Stores configuration for the stderr stream of the step.
// +optional
StderrConfig *StepOutputConfig `json:"stderrConfig,omitempty"`
// Contains the reference to an existing StepAction.
//+optional
Ref *Ref `json:"ref,omitempty"`
// Params declares parameters passed to this step action.
// +optional
Params Params `json:"params,omitempty"`
// Results declares StepResults produced by the Step.
//
// It can be used in an inlined Step when used to store Results to $(step.results.resultName.path).
// It cannot be used when referencing StepActions using [v1beta1.Step.Ref].
// The Results declared by the StepActions will be stored here instead.
// +optional
// +listType=atomic
Results []v1.StepResult `json:"results,omitempty"`
When StepWhenExpressions `json:"when,omitempty"`
}
// Ref can be used to refer to a specific instance of a StepAction.
type Ref struct {
// Name of the referenced step
Name string `json:"name,omitempty"`
// ResolverRef allows referencing a StepAction in a remote location
// like a git repo.
// +optional
ResolverRef `json:",omitempty"`
}
// OnErrorType defines a list of supported exiting behavior of a container on error
type OnErrorType string
const (
// StopAndFail indicates exit the taskRun if the container exits with non-zero exit code
StopAndFail OnErrorType = "stopAndFail"
// Continue indicates continue executing the rest of the steps irrespective of the container exit code
Continue OnErrorType = "continue"
)
// StepOutputConfig stores configuration for a step output stream.
type StepOutputConfig struct {
// Path to duplicate stdout stream to on container's local filesystem.
// +optional
Path string `json:"path,omitempty"`
}
// ToK8sContainer converts the Step to a Kubernetes Container struct
func (s *Step) ToK8sContainer() *corev1.Container {
return &corev1.Container{
Name: s.Name,
Image: s.Image,
Command: s.Command,
Args: s.Args,
WorkingDir: s.WorkingDir,
Ports: s.DeprecatedPorts,
EnvFrom: s.EnvFrom,
Env: s.Env,
Resources: s.Resources,
VolumeMounts: s.VolumeMounts,
VolumeDevices: s.VolumeDevices,
LivenessProbe: s.DeprecatedLivenessProbe,
ReadinessProbe: s.DeprecatedReadinessProbe,
StartupProbe: s.DeprecatedStartupProbe,
Lifecycle: s.DeprecatedLifecycle,
TerminationMessagePath: s.DeprecatedTerminationMessagePath,
TerminationMessagePolicy: s.DeprecatedTerminationMessagePolicy,
ImagePullPolicy: s.ImagePullPolicy,
SecurityContext: s.SecurityContext,
Stdin: s.DeprecatedStdin,
StdinOnce: s.DeprecatedStdinOnce,
TTY: s.DeprecatedTTY,
}
}
// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container
func (s *Step) SetContainerFields(c corev1.Container) {
s.Name = c.Name
s.Image = c.Image
s.Command = c.Command
s.Args = c.Args
s.WorkingDir = c.WorkingDir
s.DeprecatedPorts = c.Ports
s.EnvFrom = c.EnvFrom
s.Env = c.Env
s.Resources = c.Resources
s.VolumeMounts = c.VolumeMounts
s.VolumeDevices = c.VolumeDevices
s.DeprecatedLivenessProbe = c.LivenessProbe
s.DeprecatedReadinessProbe = c.ReadinessProbe
s.DeprecatedStartupProbe = c.StartupProbe
s.DeprecatedLifecycle = c.Lifecycle
s.DeprecatedTerminationMessagePath = c.TerminationMessagePath
s.DeprecatedTerminationMessagePolicy = c.TerminationMessagePolicy
s.ImagePullPolicy = c.ImagePullPolicy
s.SecurityContext = c.SecurityContext
s.DeprecatedStdin = c.Stdin
s.DeprecatedStdinOnce = c.StdinOnce
s.DeprecatedTTY = c.TTY
}
// StepTemplate is a template for a Step
type StepTemplate struct {
// Default name for each Step specified as a DNS_LABEL.
// Each Step in a Task must have a unique name.
// Cannot be updated.
//
// Deprecated: This field will be removed in a future release.
//
DeprecatedName string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Default image name to use for each Step.
// More info: https://kubernetes.io/docs/concepts/containers/images
// This field is optional to allow higher level config management to default or override
// container images in workload controllers like Deployments and StatefulSets.
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Step's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the Step's container. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
//
// Deprecated: This field will be removed in a future release.
//
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
// +listType=map
// +listMapKey=containerPort
// +listMapKey=protocol
DeprecatedPorts []corev1.ContainerPort `json:"ports,omitempty" patchMergeKey:"containerPort" patchStrategy:"merge" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the Step.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
// +listType=atomic
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=atomic
Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this Step.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Volumes to mount into the Step's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
// +listType=atomic
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchMergeKey:"mountPath" patchStrategy:"merge" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the Step.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
// +listType=atomic
VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchMergeKey:"devicePath" patchStrategy:"merge" protobuf:"bytes,21,rep,name=volumeDevices"`
// Periodic probe of container liveness.
// Container will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedLivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of container service readiness.
// Container will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// DeprecatedStartupProbe indicates that the Pod has successfully initialized.
// If specified, no other probes are executed until this completes successfully.
// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
// when it might take a long time to load data or warm a cache, than during steady-state operation.
// This cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedStartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
// Actions that the management system should take in response to container lifecycle events.
// Cannot be updated.
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedLifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Deprecated: This field will be removed in a future release and cannot be meaningfully used.
// +optional
DeprecatedTerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Deprecated: This field will be removed in a future release and cannot be meaningfully used.
// +optional
DeprecatedTerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// SecurityContext defines the security options the Step should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these are deprecated and should not be used.
// Whether this Step should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the Step will always result in EOF.
// Default is false.
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedStdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedStdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this Step should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true.
// Default is false.
//
// Deprecated: This field will be removed in a future release.
//
// +optional
DeprecatedTTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
}
// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container
func (s *StepTemplate) SetContainerFields(c corev1.Container) {
s.DeprecatedName = c.Name
s.Image = c.Image
s.Command = c.Command
s.Args = c.Args
s.WorkingDir = c.WorkingDir
s.DeprecatedPorts = c.Ports
s.EnvFrom = c.EnvFrom
s.Env = c.Env
s.Resources = c.Resources
s.VolumeMounts = c.VolumeMounts
s.VolumeDevices = c.VolumeDevices
s.DeprecatedLivenessProbe = c.LivenessProbe
s.DeprecatedReadinessProbe = c.ReadinessProbe
s.DeprecatedStartupProbe = c.StartupProbe
s.DeprecatedLifecycle = c.Lifecycle
s.DeprecatedTerminationMessagePath = c.TerminationMessagePath
s.DeprecatedTerminationMessagePolicy = c.TerminationMessagePolicy
s.ImagePullPolicy = c.ImagePullPolicy
s.SecurityContext = c.SecurityContext
s.DeprecatedStdin = c.Stdin
s.DeprecatedStdinOnce = c.StdinOnce
s.DeprecatedTTY = c.TTY
}
// ToK8sContainer converts the StepTemplate to a Kubernetes Container struct
func (s *StepTemplate) ToK8sContainer() *corev1.Container {
return &corev1.Container{
Name: s.DeprecatedName,
Image: s.Image,
Command: s.Command,
Args: s.Args,
WorkingDir: s.WorkingDir,
Ports: s.DeprecatedPorts,
EnvFrom: s.EnvFrom,
Env: s.Env,
Resources: s.Resources,
VolumeMounts: s.VolumeMounts,
VolumeDevices: s.VolumeDevices,
LivenessProbe: s.DeprecatedLivenessProbe,
ReadinessProbe: s.DeprecatedReadinessProbe,
StartupProbe: s.DeprecatedStartupProbe,
Lifecycle: s.DeprecatedLifecycle,
TerminationMessagePath: s.DeprecatedTerminationMessagePath,
TerminationMessagePolicy: s.DeprecatedTerminationMessagePolicy,
ImagePullPolicy: s.ImagePullPolicy,
SecurityContext: s.SecurityContext,
Stdin: s.DeprecatedStdin,
StdinOnce: s.DeprecatedStdinOnce,
TTY: s.DeprecatedTTY,
}
}
// Sidecar has nearly the same data structure as Step but does not have the ability to timeout.
type Sidecar struct {
// Name of the Sidecar specified as a DNS_LABEL.
// Each Sidecar in a Task must have a unique name (DNS_LABEL).
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Image name to be used by the Sidecar.
// More info: https://kubernetes.io/docs/concepts/containers/images
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the Sidecar's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Sidecar's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the Sidecar. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
// +listType=map
// +listMapKey=containerPort
// +listMapKey=protocol
Ports []corev1.ContainerPort `json:"ports,omitempty" patchMergeKey:"containerPort" patchStrategy:"merge" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the Sidecar.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the Sidecar is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
// +listType=atomic
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the Sidecar.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=atomic
Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this Sidecar.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Volumes to mount into the Sidecar's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
// +listType=atomic
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchMergeKey:"mountPath" patchStrategy:"merge" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the Sidecar.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
// +listType=atomic
VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchMergeKey:"devicePath" patchStrategy:"merge" protobuf:"bytes,21,rep,name=volumeDevices"`
// Periodic probe of Sidecar liveness.
// Container will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of Sidecar service readiness.
// Container will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// StartupProbe indicates that the Pod the Sidecar is running in has successfully initialized.
// If specified, no other probes are executed until this completes successfully.
// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
// when it might take a long time to load data or warm a cache, than during steady-state operation.
// This cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
StartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
// Actions that the management system should take in response to Sidecar lifecycle events.
// Cannot be updated.
// +optional
Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the Sidecar's termination message
// will be written is mounted into the Sidecar's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the Sidecar status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of Sidecar log output if the termination
// message file is empty and the Sidecar exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// SecurityContext defines the security options the Sidecar should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this Sidecar should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the Sidecar will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on Sidecar start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the Sidecar is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this Sidecar should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
// Script is the contents of an executable file to execute.
//
// If Script is not empty, the Step cannot have an Command or Args.
// +optional
Script string `json:"script,omitempty"`
// This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha"
// for this field to be supported.
//
// Workspaces is a list of workspaces from the Task that this Sidecar wants
// exclusive access to. Adding a workspace to this list means that any
// other Step or Sidecar that does not also request this Workspace will
// not have access to it.
// +optional
// +listType=atomic
Workspaces []WorkspaceUsage `json:"workspaces,omitempty"`
// RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an
// initContainer and must have it's policy set to "Always". It is currently
// left optional to help support Kubernetes versions prior to 1.29 when this feature
// was introduced.
// +optional
RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"`
}
// ToK8sContainer converts the Sidecar to a Kubernetes Container struct
func (s *Sidecar) ToK8sContainer() *corev1.Container {
if s.RestartPolicy == nil {
return &corev1.Container{
Name: s.Name,
Image: s.Image,
Command: s.Command,
Args: s.Args,
WorkingDir: s.WorkingDir,
Ports: s.Ports,
EnvFrom: s.EnvFrom,
Env: s.Env,
Resources: s.Resources,
VolumeMounts: s.VolumeMounts,
VolumeDevices: s.VolumeDevices,
LivenessProbe: s.LivenessProbe,
ReadinessProbe: s.ReadinessProbe,
StartupProbe: s.StartupProbe,
Lifecycle: s.Lifecycle,
TerminationMessagePath: s.TerminationMessagePath,
TerminationMessagePolicy: s.TerminationMessagePolicy,
ImagePullPolicy: s.ImagePullPolicy,
SecurityContext: s.SecurityContext,
Stdin: s.Stdin,
StdinOnce: s.StdinOnce,
TTY: s.TTY,
}
}
return &corev1.Container{
Name: s.Name,
Image: s.Image,
Command: s.Command,
Args: s.Args,
WorkingDir: s.WorkingDir,
Ports: s.Ports,
EnvFrom: s.EnvFrom,
Env: s.Env,
Resources: s.Resources,
VolumeMounts: s.VolumeMounts,
VolumeDevices: s.VolumeDevices,
LivenessProbe: s.LivenessProbe,
ReadinessProbe: s.ReadinessProbe,
RestartPolicy: s.RestartPolicy,
StartupProbe: s.StartupProbe,
Lifecycle: s.Lifecycle,
TerminationMessagePath: s.TerminationMessagePath,
TerminationMessagePolicy: s.TerminationMessagePolicy,
ImagePullPolicy: s.ImagePullPolicy,
SecurityContext: s.SecurityContext,
Stdin: s.Stdin,
StdinOnce: s.StdinOnce,
TTY: s.TTY,
}
}
// SetContainerFields sets the fields of the Sidecar to the values of the corresponding fields in the Container
func (s *Sidecar) SetContainerFields(c corev1.Container) {
s.Name = c.Name
s.Image = c.Image
s.Command = c.Command
s.Args = c.Args
s.WorkingDir = c.WorkingDir
s.Ports = c.Ports
s.EnvFrom = c.EnvFrom
s.Env = c.Env
s.Resources = c.Resources
s.VolumeMounts = c.VolumeMounts
s.VolumeDevices = c.VolumeDevices
s.LivenessProbe = c.LivenessProbe
s.ReadinessProbe = c.ReadinessProbe
s.StartupProbe = c.StartupProbe
s.Lifecycle = c.Lifecycle
s.TerminationMessagePath = c.TerminationMessagePath
s.TerminationMessagePolicy = c.TerminationMessagePolicy
s.ImagePullPolicy = c.ImagePullPolicy
s.SecurityContext = c.SecurityContext
s.Stdin = c.Stdin
s.StdinOnce = c.StdinOnce
s.TTY = c.TTY
s.RestartPolicy = c.RestartPolicy
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"errors"
"fmt"
"regexp"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
"k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
)
func validateRef(ctx context.Context, refName string, refResolver ResolverName, refParams Params) (errs *apis.FieldError) {
switch {
case refResolver != "" || refParams != nil:
if refParams != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
if refName != "" {
errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
}
if refResolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
errs = errs.Also(ValidateParameters(ctx, refParams))
}
if refResolver != "" {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
if refName != "" {
// make sure that the name is url-like.
err := RefNameLikeUrl(refName)
if err == nil && !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
// If name is url-like then concise resolver syntax must be enabled
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
}
if err != nil {
errs = errs.Also(apis.ErrInvalidValue(err, "name"))
}
}
}
case refName != "":
// ref name can be a Url-like format.
if err := RefNameLikeUrl(refName); err == nil {
// If name is url-like then concise resolver syntax must be enabled
if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
}
// In stage1 of concise remote resolvers syntax, this is a required field.
// TODO: remove this check when implementing stage 2 where this is optional.
if refResolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
// Or, it must be a valid k8s name
} else {
// ref name must be a valid k8s name
if errSlice := validation.IsQualifiedName(refName); len(errSlice) != 0 {
errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
}
}
default:
errs = errs.Also(apis.ErrMissingField("name"))
}
return errs
}
// Validate ensures that a supplied Ref field is populated
// correctly. No errors are returned for a nil Ref.
func (ref *Ref) Validate(ctx context.Context) (errs *apis.FieldError) {
if ref == nil {
return errs
}
return validateRef(ctx, ref.Name, ref.Resolver, ref.Params)
}
// RefNameLikeUrl checks if the name is url parsable and returns an error if it isn't.
func RefNameLikeUrl(name string) error {
schemeRegex := regexp.MustCompile(`[\w-]+:\/\/*`)
if !schemeRegex.MatchString(name) {
return errors.New("invalid URI for request")
}
return nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"github.com/tektoncd/pipeline/pkg/apis/config"
"knative.dev/pkg/apis"
)
var _ apis.Defaultable = (*CustomRun)(nil)
// SetDefaults implements apis.Defaultable
func (r *CustomRun) SetDefaults(ctx context.Context) {
ctx = apis.WithinParent(ctx, r.ObjectMeta)
r.Spec.SetDefaults(apis.WithinSpec(ctx))
}
// SetDefaults implements apis.Defaultable
func (rs *CustomRunSpec) SetDefaults(ctx context.Context) {
cfg := config.FromContextOrDefaults(ctx)
defaultSA := cfg.Defaults.DefaultServiceAccount
if rs.ServiceAccountName == "" && defaultSA != "" {
rs.ServiceAccountName = defaultSA
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
"time"
apisconfig "github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
runv1beta1 "github.com/tektoncd/pipeline/pkg/apis/run/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
// EmbeddedCustomRunSpec allows custom task definitions to be embedded
type EmbeddedCustomRunSpec struct {
runtime.TypeMeta `json:",inline"`
// +optional
Metadata PipelineTaskMetadata `json:"metadata,omitempty"`
// Spec is a specification of a custom task
// +optional
Spec runtime.RawExtension `json:"spec,omitempty"`
}
// CustomRunSpec defines the desired state of CustomRun
type CustomRunSpec struct {
// +optional
CustomRef *TaskRef `json:"customRef,omitempty"`
// Spec is a specification of a custom task
// +optional
CustomSpec *EmbeddedCustomRunSpec `json:"customSpec,omitempty"`
// +optional
Params Params `json:"params,omitempty"`
// Used for cancelling a customrun (and maybe more later on)
// +optional
Status CustomRunSpecStatus `json:"status,omitempty"`
// Status message for cancellation.
// +optional
StatusMessage CustomRunSpecStatusMessage `json:"statusMessage,omitempty"`
// Used for propagating retries count to custom tasks
// +optional
Retries int `json:"retries,omitempty"`
// +optional
ServiceAccountName string `json:"serviceAccountName"`
// Time after which the custom-task times out.
// Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Workspaces is a list of WorkspaceBindings from volumes to workspaces.
// +optional
// +listType=atomic
Workspaces []WorkspaceBinding `json:"workspaces,omitempty"`
}
// CustomRunSpecStatus defines the taskrun spec status the user can provide
type CustomRunSpecStatus string
const (
// CustomRunSpecStatusCancelled indicates that the user wants to cancel the run,
// if not already cancelled or terminated
CustomRunSpecStatusCancelled CustomRunSpecStatus = "RunCancelled"
)
// CustomRunSpecStatusMessage defines human readable status messages for the TaskRun.
type CustomRunSpecStatusMessage string
const (
// CustomRunCancelledByPipelineMsg indicates that the PipelineRun of which part this CustomRun was
// has been cancelled.
CustomRunCancelledByPipelineMsg CustomRunSpecStatusMessage = "CustomRun cancelled as the PipelineRun it belongs to has been cancelled."
// CustomRunCancelledByPipelineTimeoutMsg indicates that the Run was cancelled because the PipelineRun running it timed out.
CustomRunCancelledByPipelineTimeoutMsg CustomRunSpecStatusMessage = "CustomRun cancelled as the PipelineRun it belongs to has timed out."
)
// GetParam gets the Param from the CustomRunSpec with the given name
// TODO(jasonhall): Move this to a Params type so other code can use it?
func (rs CustomRunSpec) GetParam(name string) *Param {
for _, p := range rs.Params {
if p.Name == name {
return &p
}
}
return nil
}
// CustomRunReason is an enum used to store all Run reason for the Succeeded condition that are controlled by the CustomRun itself.
type CustomRunReason string
const (
// CustomRunReasonStarted is the reason set when the CustomRun has just started.
CustomRunReasonStarted CustomRunReason = "Started"
// CustomRunReasonRunning is the reason set when the CustomRun is running.
CustomRunReasonRunning CustomRunReason = "Running"
// CustomRunReasonSuccessful is the reason set when the CustomRun completed successfully.
CustomRunReasonSuccessful CustomRunReason = "Succeeded"
// CustomRunReasonFailed is the reason set when the CustomRun completed with a failure.
CustomRunReasonFailed CustomRunReason = "Failed"
// CustomRunReasonCancelled must be used in the Condition Reason to indicate that a CustomRun was cancelled.
CustomRunReasonCancelled CustomRunReason = "CustomRunCancelled"
// CustomRunReasonTimedOut must be used in the Condition Reason to indicate that a CustomRun was timed out.
CustomRunReasonTimedOut CustomRunReason = "CustomRunTimedOut"
// CustomRunReasonWorkspaceNotSupported can be used in the Condition Reason to indicate that the
// CustomRun contains a workspace which is not supported by this custom task.
CustomRunReasonWorkspaceNotSupported CustomRunReason = "CustomRunWorkspaceNotSupported"
)
func (t CustomRunReason) String() string {
return string(t)
}
// CustomRunStatus defines the observed state of CustomRun.
type CustomRunStatus = runv1beta1.CustomRunStatus
var customrunCondSet = apis.NewBatchConditionSet()
// GetConditionSet retrieves the condition set for this resource. Implements
// the KRShaped interface.
func (r *CustomRun) GetConditionSet() apis.ConditionSet { return customrunCondSet }
// GetStatus retrieves the status of the Parallel. Implements the KRShaped
// interface.
func (r *CustomRun) GetStatus() *duckv1.Status { return &r.Status.Status }
// CustomRunStatusFields holds the fields of CustomRun's status. This is defined
// separately and inlined so that other types can readily consume these fields
// via duck typing.
type CustomRunStatusFields = runv1beta1.CustomRunStatusFields
// CustomRunResult used to describe the results of a task
type CustomRunResult = runv1beta1.CustomRunResult
// +genclient
// +genreconciler
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CustomRun represents a single execution of a Custom Task.
//
// +k8s:openapi-gen=true
type CustomRun struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec CustomRunSpec `json:"spec,omitempty"`
// +optional
Status CustomRunStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CustomRunList contains a list of CustomRun
type CustomRunList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []CustomRun `json:"items"`
}
// GetStatusCondition returns the task run status as a ConditionAccessor
func (r *CustomRun) GetStatusCondition() apis.ConditionAccessor {
return &r.Status
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*CustomRun) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind(pipeline.CustomRunControllerName)
}
// HasPipelineRunOwnerReference returns true of CustomRun has
// owner reference of type PipelineRun
func (r *CustomRun) HasPipelineRunOwnerReference() bool {
for _, ref := range r.GetOwnerReferences() {
if ref.Kind == pipeline.PipelineRunControllerName {
return true
}
}
return false
}
// IsCancelled returns true if the CustomRun's spec status is set to Cancelled state
func (r *CustomRun) IsCancelled() bool {
return r.Spec.Status == CustomRunSpecStatusCancelled
}
// IsDone returns true if the CustomRun's status indicates that it is done.
func (r *CustomRun) IsDone() bool {
return !r.Status.GetCondition(apis.ConditionSucceeded).IsUnknown()
}
// HasStarted function check whether taskrun has valid start time set in its status
func (r *CustomRun) HasStarted() bool {
return r.Status.StartTime != nil && !r.Status.StartTime.IsZero()
}
// IsSuccessful returns true if the CustomRun's status indicates that it has succeeded.
func (r *CustomRun) IsSuccessful() bool {
return r != nil && r.Status.GetCondition(apis.ConditionSucceeded).IsTrue()
}
// IsFailure returns true if the CustomRun's status indicates that it has failed.
func (r *CustomRun) IsFailure() bool {
return r != nil && r.Status.GetCondition(apis.ConditionSucceeded).IsFalse()
}
// GetCustomRunKey return the customrun's key for timeout handler map
func (r *CustomRun) GetCustomRunKey() string {
// The address of the pointer is a threadsafe unique identifier for the customrun
return fmt.Sprintf("%s/%p", "CustomRun", r)
}
// HasTimedOut returns true if the CustomRun's running time is beyond the allowed timeout
func (r *CustomRun) HasTimedOut(c clock.PassiveClock) bool {
if r.Status.StartTime == nil || r.Status.StartTime.IsZero() {
return false
}
timeout := r.GetTimeout()
// If timeout is set to 0 or defaulted to 0, there is no timeout.
if timeout == apisconfig.NoTimeoutDuration {
return false
}
runtime := c.Since(r.Status.StartTime.Time)
return runtime > timeout
}
// GetTimeout returns the timeout for this customrun, or the default if not configured
func (r *CustomRun) GetTimeout() time.Duration {
// Use the platform default if no timeout is set
if r.Spec.Timeout == nil {
return apisconfig.DefaultTimeoutMinutes * time.Minute
}
return r.Spec.Timeout.Duration
}
// GetRetryCount returns the number of times this CustomRun has already been retried
func (r *CustomRun) GetRetryCount() int {
return len(r.Status.RetriesStatus)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"github.com/tektoncd/pipeline/pkg/apis/validate"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/api/equality"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var _ apis.Validatable = (*CustomRun)(nil)
var _ resourcesemantics.VerbLimited = (*CustomRun)(nil)
// SupportedVerbs returns the operations that validation should be called for
func (r *CustomRun) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate customRun
func (r *CustomRun) Validate(ctx context.Context) *apis.FieldError {
if err := validate.ObjectMetadata(r.GetObjectMeta()).ViaField("metadata"); err != nil {
return err
}
return r.Spec.Validate(ctx)
}
// Validate CustomRun spec
func (rs *CustomRunSpec) Validate(ctx context.Context) *apis.FieldError {
// this covers the case rs.customRef == nil && rs.customSpec == nil
if equality.Semantic.DeepEqual(rs, &CustomRunSpec{}) {
return apis.ErrMissingField("spec")
}
if rs.CustomRef != nil && rs.CustomSpec != nil {
return apis.ErrMultipleOneOf("spec.customRef", "spec.customSpec")
}
if rs.CustomRef == nil && rs.CustomSpec == nil {
return apis.ErrMissingOneOf("spec.customRef", "spec.customSpec")
}
if rs.CustomRef != nil {
if rs.CustomRef.APIVersion == "" {
return apis.ErrMissingField("spec.customRef.apiVersion")
}
if rs.CustomRef.Kind == "" {
return apis.ErrMissingField("spec.customRef.kind")
}
}
if rs.CustomSpec != nil {
if rs.CustomSpec.APIVersion == "" {
return apis.ErrMissingField("spec.customSpec.apiVersion")
}
if rs.CustomSpec.Kind == "" {
return apis.ErrMissingField("spec.customSpec.kind")
}
}
if rs.Status == "" {
if rs.StatusMessage != "" {
return apis.ErrInvalidValue(fmt.Sprintf("statusMessage should not be set if status is not set, but it is currently set to %s", rs.StatusMessage), "statusMessage")
}
}
if err := ValidateParameters(ctx, rs.Params).ViaField("spec.params"); err != nil {
return err
}
return ValidateWorkspaceBindings(ctx, rs.Workspaces).ViaField("spec.workspaces")
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"maps"
"sort"
"github.com/tektoncd/pipeline/pkg/apis/config"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/strings/slices"
"knative.dev/pkg/apis"
)
// Matrix is used to fan out Tasks in a Pipeline
type Matrix struct {
// Params is a list of parameters used to fan out the pipelineTask
// Params takes only `Parameters` of type `"array"`
// Each array element is supplied to the `PipelineTask` by substituting `params` of type `"string"` in the underlying `Task`.
// The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting.
Params Params `json:"params,omitempty"`
// Include is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix.
// +optional
Include IncludeParamsList `json:"include,omitempty"`
}
// IncludeParamsList is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix.
// +listType=atomic
type IncludeParamsList []IncludeParams
// IncludeParams allows passing in a specific combinations of Parameters into the Matrix.
type IncludeParams struct {
// Name the specified combination
Name string `json:"name,omitempty"`
// Params takes only `Parameters` of type `"string"`
// The names of the `params` must match the names of the `params` in the underlying `Task`
Params Params `json:"params,omitempty"`
}
// Combination is a map, mainly defined to hold a single combination from a Matrix with key as param.Name and value as param.Value
type Combination map[string]string
// Combinations is a Combination list
type Combinations []Combination
// FanOut returns an list of params that represent combinations
func (m *Matrix) FanOut() []Params {
var combinations, includeCombinations Combinations
includeCombinations = m.getIncludeCombinations()
if m.HasInclude() && !m.HasParams() {
// If there are only Matrix Include Parameters return explicit combinations
return includeCombinations.toParams()
}
// Generate combinations from Matrix Parameters
for _, parameter := range m.Params {
combinations = combinations.fanOutMatrixParams(parameter)
}
combinations.overwriteCombinations(includeCombinations)
combinations = combinations.addNewCombinations(includeCombinations)
return combinations.toParams()
}
// overwriteCombinations replaces any missing include params in the initial
// matrix params combinations by overwriting the initial combinations with the
// include combinations
func (cs Combinations) overwriteCombinations(ics Combinations) {
for _, paramCombination := range cs {
for _, includeCombination := range ics {
if paramCombination.contains(includeCombination) {
// overwrite the parameter name and value in existing combination
// with the include combination
for name, val := range includeCombination {
paramCombination[name] = val
}
}
}
}
}
// addNewCombinations creates a new combination for any include parameter
// values that are missing entirely from the initial combinations and
// returns all combinations
func (cs Combinations) addNewCombinations(ics Combinations) Combinations {
for _, includeCombination := range ics {
if cs.shouldAddNewCombination(includeCombination) {
cs = append(cs, includeCombination)
}
}
return cs
}
// contains returns true if the include parameter name and value exists in combinations
func (c Combination) contains(includeCombination Combination) bool {
for name, val := range includeCombination {
if _, exist := c[name]; exist {
if c[name] != val {
return false
}
}
}
return true
}
// shouldAddNewCombination returns true if the include parameter name exists but the value is
// missing from combinations
func (cs Combinations) shouldAddNewCombination(includeCombination map[string]string) bool {
if len(includeCombination) == 0 {
return false
}
for _, paramCombination := range cs {
for name, val := range includeCombination {
if _, exist := paramCombination[name]; exist {
if paramCombination[name] == val {
return false
}
}
}
}
return true
}
// toParams transforms Combinations from a slice of map[string]string to a slice of Params
// such that, these combinations can be directly consumed in creating taskRun/run object
func (cs Combinations) toParams() []Params {
listOfParams := make([]Params, len(cs))
for i := range cs {
var params Params
combination := cs[i]
order, _ := combination.sortCombination()
for _, key := range order {
params = append(params, Param{
Name: key,
Value: ParamValue{Type: ParamTypeString, StringVal: combination[key]},
})
}
listOfParams[i] = params
}
return listOfParams
}
// fanOutMatrixParams generates new combinations based on Matrix Parameters.
func (cs Combinations) fanOutMatrixParams(param Param) Combinations {
if len(cs) == 0 {
return initializeCombinations(param)
}
return cs.distribute(param)
}
// getIncludeCombinations generates combinations based on Matrix Include Parameters
func (m *Matrix) getIncludeCombinations() Combinations {
var combinations Combinations
for i := range m.Include {
includeParams := m.Include[i].Params
newCombination := make(Combination)
for _, param := range includeParams {
newCombination[param.Name] = param.Value.StringVal
}
combinations = append(combinations, newCombination)
}
return combinations
}
// distribute generates a new Combination of Parameters by adding a new Parameter to an existing list of Combinations.
func (cs Combinations) distribute(param Param) Combinations {
var expandedCombinations Combinations
for _, value := range param.Value.ArrayVal {
for _, combination := range cs {
newCombination := make(Combination)
maps.Copy(newCombination, combination)
newCombination[param.Name] = value
_, orderedCombination := newCombination.sortCombination()
expandedCombinations = append(expandedCombinations, orderedCombination)
}
}
return expandedCombinations
}
// initializeCombinations generates a new Combination based on the first Parameter in the Matrix.
func initializeCombinations(param Param) Combinations {
var combinations Combinations
for _, value := range param.Value.ArrayVal {
combinations = append(combinations, Combination{param.Name: value})
}
return combinations
}
// sortCombination sorts the given Combination based on the Parameter names to produce a deterministic ordering
func (c Combination) sortCombination() ([]string, Combination) {
sortedCombination := make(Combination, len(c))
order := make([]string, 0, len(c))
for key := range c {
order = append(order, key)
}
sort.Slice(order, func(i, j int) bool {
return order[i] <= order[j]
})
for _, key := range order {
sortedCombination[key] = c[key]
}
return order, sortedCombination
}
// CountCombinations returns the count of Combinations of Parameters generated from the Matrix in PipelineTask.
func (m *Matrix) CountCombinations() int {
// Iterate over Matrix Parameters and compute count of all generated Combinations
count := m.countGeneratedCombinationsFromParams()
// Add any additional Combinations generated from Matrix Include Parameters
count += m.countNewCombinationsFromInclude()
return count
}
// countGeneratedCombinationsFromParams returns the count of Combinations of Parameters generated from the Matrix
// Parameters
func (m *Matrix) countGeneratedCombinationsFromParams() int {
if !m.HasParams() {
return 0
}
count := 1
for _, param := range m.Params {
count *= len(param.Value.ArrayVal)
}
return count
}
// countNewCombinationsFromInclude returns the count of Combinations of Parameters generated from the Matrix
// Include Parameters
func (m *Matrix) countNewCombinationsFromInclude() int {
if !m.HasInclude() {
return 0
}
if !m.HasParams() {
return len(m.Include)
}
count := 0
matrixParamMap := m.Params.extractParamMapArrVals()
for _, include := range m.Include {
for _, param := range include.Params {
if val, exist := matrixParamMap[param.Name]; exist {
// If the Matrix Include param values does not exist, a new Combination will be generated
if !slices.Contains(val, param.Value.StringVal) {
count++
} else {
break
}
}
}
}
return count
}
// HasInclude returns true if the Matrix has Include Parameters
func (m *Matrix) HasInclude() bool {
return m != nil && m.Include != nil && len(m.Include) > 0
}
// HasParams returns true if the Matrix has Parameters
func (m *Matrix) HasParams() bool {
return m != nil && m.Params != nil && len(m.Params) > 0
}
// GetAllParams returns a list of all Matrix Parameters
func (m *Matrix) GetAllParams() Params {
var params Params
if m.HasParams() {
params = append(params, m.Params...)
}
if m.HasInclude() {
for _, include := range m.Include {
params = append(params, include.Params...)
}
}
return params
}
func (m *Matrix) validateCombinationsCount(ctx context.Context) (errs *apis.FieldError) {
matrixCombinationsCount := m.CountCombinations()
maxMatrixCombinationsCount := config.FromContextOrDefaults(ctx).Defaults.DefaultMaxMatrixCombinationsCount
if matrixCombinationsCount > maxMatrixCombinationsCount {
errs = errs.Also(apis.ErrOutOfBoundsValue(matrixCombinationsCount, 0, maxMatrixCombinationsCount, "matrix"))
}
return errs
}
// validateUniqueParams validates Matrix.Params for a unique list of params
// and a unique list of params in each Matrix.Include.Params specification
func (m *Matrix) validateUniqueParams() (errs *apis.FieldError) {
if m != nil {
if m.HasInclude() {
for i, include := range m.Include {
errs = errs.Also(include.Params.validateDuplicateParameters().ViaField(fmt.Sprintf("matrix.include[%d].params", i)))
}
}
if m.HasParams() {
errs = errs.Also(m.Params.validateDuplicateParameters().ViaField("matrix.params"))
}
}
return errs
}
// validatePipelineParametersVariablesInMatrixParameters validates all pipeline parameter variables including Matrix.Params and Matrix.Include.Params
// that may contain the reference(s) to other params to make sure those references are used appropriately.
func (m *Matrix) validatePipelineParametersVariablesInMatrixParameters(prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
if m.HasInclude() {
for _, include := range m.Include {
for idx, param := range include.Params {
stringElement := param.Value.StringVal
// Matrix Include Params must be of type string
errs = errs.Also(validateStringVariable(stringElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("", idx).ViaField("matrix.include.params", ""))
}
}
}
if m.HasParams() {
for _, param := range m.Params {
for idx, arrayElement := range param.Value.ArrayVal {
// Matrix Params must be of type array
errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("matrix.params", param.Name))
}
}
}
return errs
}
func (m *Matrix) validateParameterInOneOfMatrixOrParams(params Params) (errs *apis.FieldError) {
matrixParamNames := m.GetAllParams().ExtractNames()
for _, param := range params {
if matrixParamNames.Has(param.Name) {
errs = errs.Also(apis.ErrMultipleOneOf("matrix["+param.Name+"]", "params["+param.Name+"]"))
}
}
return errs
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"encoding/json"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/strategicpatch"
)
// mergeData is used to store the intermediate data needed to merge an object
// with a template. It's provided to avoid repeatedly re-serializing the template.
// +k8s:openapi-gen=false
type mergeData struct {
emptyJSON []byte
templateJSON []byte
patchSchema strategicpatch.PatchMetaFromStruct
}
// MergeStepsWithStepTemplate takes a possibly nil container template and a
// list of steps, merging each of the steps with the container template, if
// it's not nil, and returning the resulting list.
func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, error) {
if template == nil {
return steps, nil
}
md, err := getMergeData(template.ToK8sContainer(), &corev1.Container{})
if err != nil {
return nil, err
}
for i, s := range steps {
// If the stepaction has not been fetched yet then do not merge.
// Skip over to the next one
if s.Ref != nil {
continue
}
merged := corev1.Container{}
err := mergeObjWithTemplateBytes(md, s.ToK8sContainer(), &merged)
if err != nil {
return nil, err
}
// If the container's args is nil, reset it to empty instead
if merged.Args == nil && s.Args != nil {
merged.Args = []string{}
}
amendConflictingContainerFields(&merged, s)
// Pass through original step Script, for later conversion.
newStep := Step{Script: s.Script, OnError: s.OnError, Timeout: s.Timeout, StdoutConfig: s.StdoutConfig, StderrConfig: s.StderrConfig, When: s.When}
newStep.SetContainerFields(merged)
steps[i] = newStep
}
return steps, nil
}
// MergeStepsWithOverrides takes a possibly nil list of overrides and a
// list of steps, merging each of the steps with the overrides' resource requirements, if
// it's not nil, and returning the resulting list.
func MergeStepsWithOverrides(steps []Step, overrides []TaskRunStepOverride) ([]Step, error) {
stepNameToOverride := make(map[string]TaskRunStepOverride, len(overrides))
for _, o := range overrides {
stepNameToOverride[o.Name] = o
}
for i, s := range steps {
o, found := stepNameToOverride[s.Name]
if !found {
continue
}
merged := v1.ResourceRequirements{}
err := mergeObjWithTemplate(&s.Resources, &o.Resources, &merged)
if err != nil {
return nil, err
}
steps[i].Resources = merged
}
return steps, nil
}
// MergeSidecarsWithOverrides takes a possibly nil list of overrides and a
// list of sidecars, merging each of the sidecars with the overrides' resource requirements, if
// it's not nil, and returning the resulting list.
func MergeSidecarsWithOverrides(sidecars []Sidecar, overrides []TaskRunSidecarOverride) ([]Sidecar, error) {
if len(overrides) == 0 {
return sidecars, nil
}
sidecarNameToOverride := make(map[string]TaskRunSidecarOverride, len(overrides))
for _, o := range overrides {
sidecarNameToOverride[o.Name] = o
}
for i, s := range sidecars {
o, found := sidecarNameToOverride[s.Name]
if !found {
continue
}
merged := v1.ResourceRequirements{}
err := mergeObjWithTemplate(&s.Resources, &o.Resources, &merged)
if err != nil {
return nil, err
}
sidecars[i].Resources = merged
}
return sidecars, nil
}
// mergeObjWithTemplate merges obj with template and updates out to reflect the merged result.
// template, obj, and out should point to the same type. out points to the zero value of that type.
func mergeObjWithTemplate(template, obj, out interface{}) error {
md, err := getMergeData(template, out)
if err != nil {
return err
}
return mergeObjWithTemplateBytes(md, obj, out)
}
// getMergeData serializes the template and empty object to get the intermediate results necessary for
// merging an object of the same type with this template.
// This function is provided to avoid repeatedly serializing an identical template.
func getMergeData(template, empty interface{}) (*mergeData, error) {
// We need JSON bytes to generate a patch to merge the object
// onto the template, so marshal the template.
templateJSON, err := json.Marshal(template)
if err != nil {
return nil, err
}
// We need to do a three-way merge to actually merge the template and
// object, so we need an empty object as the "original"
emptyJSON, err := json.Marshal(empty)
if err != nil {
return nil, err
}
// Get the patch meta, which is needed for generating and applying the merge patch.
patchSchema, err := strategicpatch.NewPatchMetaFromStruct(template)
if err != nil {
return nil, err
}
return &mergeData{templateJSON: templateJSON, emptyJSON: emptyJSON, patchSchema: patchSchema}, nil
}
// mergeObjWithTemplateBytes merges obj with md's template JSON and updates out to reflect the merged result.
// out is a pointer to the zero value of obj's type.
// This function is provided to avoid repeatedly serializing an identical template.
func mergeObjWithTemplateBytes(md *mergeData, obj, out interface{}) error {
// Marshal the object to JSON
objAsJSON, err := json.Marshal(obj)
if err != nil {
return err
}
// Create a merge patch, with the empty JSON as the original, the object JSON as the modified, and the template
// JSON as the current - this lets us do a deep merge of the template and object, with awareness of
// the "patchMerge" tags.
patch, err := strategicpatch.CreateThreeWayMergePatch(md.emptyJSON, objAsJSON, md.templateJSON, md.patchSchema, true)
if err != nil {
return err
}
// Actually apply the merge patch to the template JSON.
mergedAsJSON, err := strategicpatch.StrategicMergePatchUsingLookupPatchMeta(md.templateJSON, patch, md.patchSchema)
if err != nil {
return err
}
// Unmarshal the merged JSON to a pointer, and return it.
return json.Unmarshal(mergedAsJSON, out)
}
// amendConflictingContainerFields amends conflicting container fields after merge, and overrides conflicting fields
// by fields in step.
func amendConflictingContainerFields(container *corev1.Container, step Step) {
if container == nil || len(step.Env) == 0 {
return
}
envNameToStepEnv := make(map[string]corev1.EnvVar, len(step.Env))
for _, e := range step.Env {
envNameToStepEnv[e.Name] = e
}
for index, env := range container.Env {
if env.ValueFrom != nil && len(env.Value) > 0 {
if e, ok := envNameToStepEnv[env.Name]; ok {
container.Env[index] = e
}
}
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by openapi-gen. DO NOT EDIT.
package v1beta1
import (
common "k8s.io/kube-openapi/pkg/common"
spec "k8s.io/kube-openapi/pkg/validation/spec"
)
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Artifact": schema_pkg_apis_pipeline_v1beta1_Artifact(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArtifactValue": schema_pkg_apis_pipeline_v1beta1_ArtifactValue(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Artifacts": schema_pkg_apis_pipeline_v1beta1_Artifacts(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference": schema_pkg_apis_pipeline_v1beta1_ChildStatusReference(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery": schema_pkg_apis_pipeline_v1beta1_CloudEventDelivery(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDeliveryState": schema_pkg_apis_pipeline_v1beta1_CloudEventDeliveryState(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource": schema_pkg_apis_pipeline_v1beta1_ConfigSource(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRun": schema_pkg_apis_pipeline_v1beta1_CustomRun(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRunList": schema_pkg_apis_pipeline_v1beta1_CustomRunList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRunSpec": schema_pkg_apis_pipeline_v1beta1_CustomRunSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedCustomRunSpec": schema_pkg_apis_pipeline_v1beta1_EmbeddedCustomRunSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask": schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.IncludeParams": schema_pkg_apis_pipeline_v1beta1_IncludeParams(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.InternalTaskModifier": schema_pkg_apis_pipeline_v1beta1_InternalTaskModifier(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Matrix": schema_pkg_apis_pipeline_v1beta1_Matrix(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param": schema_pkg_apis_pipeline_v1beta1_Param(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec": schema_pkg_apis_pipeline_v1beta1_ParamSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue": schema_pkg_apis_pipeline_v1beta1_ParamValue(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Pipeline": schema_pkg_apis_pipeline_v1beta1_Pipeline(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineDeclaredResource": schema_pkg_apis_pipeline_v1beta1_PipelineDeclaredResource(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineList": schema_pkg_apis_pipeline_v1beta1_PipelineList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef": schema_pkg_apis_pipeline_v1beta1_PipelineRef(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceBinding": schema_pkg_apis_pipeline_v1beta1_PipelineResourceBinding(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef": schema_pkg_apis_pipeline_v1beta1_PipelineResourceRef(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResult": schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun": schema_pkg_apis_pipeline_v1beta1_PipelineRun(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunList": schema_pkg_apis_pipeline_v1beta1_PipelineRunList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult": schema_pkg_apis_pipeline_v1beta1_PipelineRunResult(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunRunStatus(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunSpec": schema_pkg_apis_pipeline_v1beta1_PipelineRunSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunStatus(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatusFields": schema_pkg_apis_pipeline_v1beta1_PipelineRunStatusFields(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunTaskRunStatus(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec": schema_pkg_apis_pipeline_v1beta1_PipelineSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTask": schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskInputResource": schema_pkg_apis_pipeline_v1beta1_PipelineTaskInputResource(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata": schema_pkg_apis_pipeline_v1beta1_PipelineTaskMetadata(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskOutputResource": schema_pkg_apis_pipeline_v1beta1_PipelineTaskOutputResource(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskParam": schema_pkg_apis_pipeline_v1beta1_PipelineTaskParam(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources": schema_pkg_apis_pipeline_v1beta1_PipelineTaskResources(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRun": schema_pkg_apis_pipeline_v1beta1_PipelineTaskRun(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRunSpec": schema_pkg_apis_pipeline_v1beta1_PipelineTaskRunSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1beta1_PipelineWorkspaceDeclaration(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec": schema_pkg_apis_pipeline_v1beta1_PropertySpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance": schema_pkg_apis_pipeline_v1beta1_Provenance(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Ref": schema_pkg_apis_pipeline_v1beta1_Ref(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource": schema_pkg_apis_pipeline_v1beta1_RefSource(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResolverRef": schema_pkg_apis_pipeline_v1beta1_ResolverRef(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResultRef": schema_pkg_apis_pipeline_v1beta1_ResultRef(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar": schema_pkg_apis_pipeline_v1beta1_Sidecar(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState": schema_pkg_apis_pipeline_v1beta1_SidecarState(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask": schema_pkg_apis_pipeline_v1beta1_SkippedTask(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step": schema_pkg_apis_pipeline_v1beta1_Step(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepAction": schema_pkg_apis_pipeline_v1beta1_StepAction(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepActionList": schema_pkg_apis_pipeline_v1beta1_StepActionList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepActionSpec": schema_pkg_apis_pipeline_v1beta1_StepActionSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig": schema_pkg_apis_pipeline_v1beta1_StepOutputConfig(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState": schema_pkg_apis_pipeline_v1beta1_StepState(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate": schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Task": schema_pkg_apis_pipeline_v1beta1_Task(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskBreakpoints": schema_pkg_apis_pipeline_v1beta1_TaskBreakpoints(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskList": schema_pkg_apis_pipeline_v1beta1_TaskList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef": schema_pkg_apis_pipeline_v1beta1_TaskRef(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResource": schema_pkg_apis_pipeline_v1beta1_TaskResource(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding": schema_pkg_apis_pipeline_v1beta1_TaskResourceBinding(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources": schema_pkg_apis_pipeline_v1beta1_TaskResources(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult": schema_pkg_apis_pipeline_v1beta1_TaskResult(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRun": schema_pkg_apis_pipeline_v1beta1_TaskRun(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunDebug": schema_pkg_apis_pipeline_v1beta1_TaskRunDebug(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunInputs": schema_pkg_apis_pipeline_v1beta1_TaskRunInputs(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunList": schema_pkg_apis_pipeline_v1beta1_TaskRunList(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunOutputs": schema_pkg_apis_pipeline_v1beta1_TaskRunOutputs(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources": schema_pkg_apis_pipeline_v1beta1_TaskRunResources(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult": schema_pkg_apis_pipeline_v1beta1_TaskRunResult(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride": schema_pkg_apis_pipeline_v1beta1_TaskRunSidecarOverride(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSpec": schema_pkg_apis_pipeline_v1beta1_TaskRunSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus": schema_pkg_apis_pipeline_v1beta1_TaskRunStatus(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatusFields": schema_pkg_apis_pipeline_v1beta1_TaskRunStatusFields(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride": schema_pkg_apis_pipeline_v1beta1_TaskRunStepOverride(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec": schema_pkg_apis_pipeline_v1beta1_TaskSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TimeoutFields": schema_pkg_apis_pipeline_v1beta1_TimeoutFields(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression": schema_pkg_apis_pipeline_v1beta1_WhenExpression(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding": schema_pkg_apis_pipeline_v1beta1_WorkspaceBinding(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration": schema_pkg_apis_pipeline_v1beta1_WorkspaceDeclaration(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspacePipelineTaskBinding": schema_pkg_apis_pipeline_v1beta1_WorkspacePipelineTaskBinding(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage": schema_pkg_apis_pipeline_v1beta1_WorkspaceUsage(ref),
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequest": schema_pkg_apis_resolution_v1beta1_ResolutionRequest(ref),
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequestList": schema_pkg_apis_resolution_v1beta1_ResolutionRequestList(ref),
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequestSpec": schema_pkg_apis_resolution_v1beta1_ResolutionRequestSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequestStatus": schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatus(ref),
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequestStatusFields": schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatusFields(ref),
}
}
func schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "AffinityAssistantTemplate holds pod specific configuration and is a subset of the generic pod Template",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"tolerations": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's tolerations.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"imagePullSecrets": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
},
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext sets the security context for the pod",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"},
}
}
func schema_pkg_apis_pipeline_pod_Template(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Template holds pod specific configuration",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables that can be provided to the containers belonging to the pod.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"tolerations": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's tolerations.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's scheduling constraints. See Pod.spec.affinity (API version: v1)",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. See Pod.spec.securityContext (API version: v1)",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"volumes": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge,retainKeys",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes See Pod.spec.volumes (API version: v1)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Volume"),
},
},
},
},
},
"runtimeClassName": {
SchemaProps: spec.SchemaProps{
Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.",
Type: []string{"string"},
Format: "",
},
},
"automountServiceAccountToken": {
SchemaProps: spec.SchemaProps{
Description: "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.",
Type: []string{"boolean"},
Format: "",
},
},
"dnsPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.",
Type: []string{"string"},
Format: "",
},
},
"dnsConfig": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
Ref: ref("k8s.io/api/core/v1.PodDNSConfig"),
},
},
"enableServiceLinks": {
SchemaProps: spec.SchemaProps{
Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
Type: []string{"boolean"},
Format: "",
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
Type: []string{"string"},
Format: "",
},
},
"schedulerName": {
SchemaProps: spec.SchemaProps{
Description: "SchedulerName specifies the scheduler to be used to dispatch the Pod",
Type: []string{"string"},
Format: "",
},
},
"imagePullSecrets": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
},
},
},
"hostAliases": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.HostAlias"),
},
},
},
},
},
"hostNetwork": {
SchemaProps: spec.SchemaProps{
Description: "HostNetwork specifies whether the pod may use the node network namespace",
Type: []string{"boolean"},
Format: "",
},
},
"topologySpreadConstraints": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "TopologySpreadConstraints controls how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume"},
}
}
func schema_pkg_apis_pipeline_v1beta1_Artifact(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunStepArtifact represents an artifact produced or used by a step within a task run. It directly uses the Artifact type for its structure.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The artifact's identifying category name",
Type: []string{"string"},
Format: "",
},
},
"values": {
SchemaProps: spec.SchemaProps{
Description: "A collection of values related to the artifact",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArtifactValue"),
},
},
},
},
},
"buildOutput": {
SchemaProps: spec.SchemaProps{
Description: "Indicate if the artifact is a build output or a by-product",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArtifactValue"},
}
}
func schema_pkg_apis_pipeline_v1beta1_ArtifactValue(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ArtifactValue represents a specific value or data element within an Artifact.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"digest": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"uri": {
SchemaProps: spec.SchemaProps{
Description: "Algorithm-specific digests for verifying the content (e.g., SHA256)",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_Artifacts(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Artifacts represents the collection of input and output artifacts associated with a task run or a similar process. Artifacts in this context are units of data or resources that the process either consumes as input or produces as output.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"inputs": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Artifact"),
},
},
},
},
},
"outputs": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Artifact"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Artifact"},
}
}
func schema_pkg_apis_pipeline_v1beta1_ChildStatusReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ChildStatusReference is used to point to the statuses of individual TaskRuns and Runs within this PipelineRun.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the TaskRun or Run this is referencing.",
Type: []string{"string"},
Format: "",
},
},
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is a user-facing name of the pipelineTask that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"pipelineTaskName": {
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskName is the name of the PipelineTask this is referencing.",
Type: []string{"string"},
Format: "",
},
},
"whenExpressions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"},
}
}
func schema_pkg_apis_pipeline_v1beta1_CloudEventDelivery(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CloudEventDelivery is the target of a cloud event along with the state of delivery.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"target": {
SchemaProps: spec.SchemaProps{
Description: "Target points to an addressable",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDeliveryState"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDeliveryState"},
}
}
func schema_pkg_apis_pipeline_v1beta1_CloudEventDeliveryState(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CloudEventDeliveryState reports the state of a cloud event to be sent.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"condition": {
SchemaProps: spec.SchemaProps{
Description: "Current status",
Type: []string{"string"},
Format: "",
},
},
"sentAt": {
SchemaProps: spec.SchemaProps{
Description: "SentAt is the time at which the last attempt to send the event was made",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "Error is the text of error (if any)",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"retryCount": {
SchemaProps: spec.SchemaProps{
Description: "RetryCount is the number of attempts of sending the cloud event",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"message", "retryCount"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_pipeline_v1beta1_ConfigSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ConfigSource contains the information that can uniquely identify where a remote built definition came from i.e. Git repositories, Tekton Bundles in OCI registry and hub.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"uri": {
SchemaProps: spec.SchemaProps{
Description: "URI indicates the identity of the source of the build definition. Example: \"https://github.com/tektoncd/catalog\"",
Type: []string{"string"},
Format: "",
},
},
"digest": {
SchemaProps: spec.SchemaProps{
Description: "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"entryPoint": {
SchemaProps: spec.SchemaProps{
Description: "EntryPoint identifies the entry point into the build. This is often a path to a build definition file and/or a target label within that file. Example: \"task/git-clone/0.10/git-clone.yaml\"",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_CustomRun(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CustomRun represents a single execution of a Custom Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRunSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/run/v1beta1.CustomRunStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRunSpec", "github.com/tektoncd/pipeline/pkg/apis/run/v1beta1.CustomRunStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_CustomRunList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CustomRunList contains a list of CustomRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRun"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRun", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_CustomRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CustomRunSpec defines the desired state of CustomRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"customRef": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef"),
},
},
"customSpec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is a specification of a custom task",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedCustomRunSpec"),
},
},
"params": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"),
},
},
},
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Used for cancelling a customrun (and maybe more later on)",
Type: []string{"string"},
Format: "",
},
},
"statusMessage": {
SchemaProps: spec.SchemaProps{
Description: "Status message for cancellation.",
Type: []string{"string"},
Format: "",
},
},
"retries": {
SchemaProps: spec.SchemaProps{
Description: "Used for propagating retries count to custom tasks",
Type: []string{"integer"},
Format: "int32",
},
},
"serviceAccountName": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"timeout": {
SchemaProps: spec.SchemaProps{
Description: "Time after which the custom-task times out. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces is a list of WorkspaceBindings from volumes to workspaces.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedCustomRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1beta1_EmbeddedCustomRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EmbeddedCustomRunSpec allows custom task definitions to be embedded",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is a specification of a custom task",
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata", "k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EmbeddedTask is used to define a Task inline within a Pipeline's PipelineTasks.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is a specification of a custom task",
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata"),
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Resources is a list input and output resource to run the task Resources are represented in TaskRuns as bindings to instances of PipelineResources.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources"),
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec"),
},
},
},
},
},
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is a user-facing name of the task that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the task that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"steps": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step"),
},
},
},
},
},
"volumes": {
SchemaProps: spec.SchemaProps{
Description: "Volumes is a collection of volumes that are available to mount into the steps of the build. See Pod.spec.volumes (API version: v1)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Volume"),
},
},
},
},
},
"stepTemplate": {
SchemaProps: spec.SchemaProps{
Description: "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate"),
},
},
"sidecars": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar"),
},
},
},
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces are the volumes that this Task requires.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are values that this Task can output",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_pkg_apis_pipeline_v1beta1_IncludeParams(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "IncludeParams allows passing in a specific combinations of Parameters into the Matrix.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name the specified combination",
Type: []string{"string"},
Format: "",
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params takes only `Parameters` of type `\"string\"` The names of the `params` must match the names of the `params` in the underlying `Task`",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"},
}
}
func schema_pkg_apis_pipeline_v1beta1_InternalTaskModifier(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "InternalTaskModifier implements TaskModifier for resources that are built-in to Tekton Pipelines.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"stepsToPrepend": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step"),
},
},
},
},
},
"stepsToAppend": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step"),
},
},
},
},
},
"volumes": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Volume"),
},
},
},
},
},
},
Required: []string{"stepsToPrepend", "stepsToAppend", "volumes"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step", "k8s.io/api/core/v1.Volume"},
}
}
func schema_pkg_apis_pipeline_v1beta1_Matrix(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Matrix is used to fan out Tasks in a Pipeline",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params is a list of parameters used to fan out the pipelineTask Params takes only `Parameters` of type `\"array\"` Each array element is supplied to the `PipelineTask` by substituting `params` of type `\"string\"` in the underlying `Task`. The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"),
},
},
},
},
},
"include": {
SchemaProps: spec.SchemaProps{
Description: "Include is a list of IncludeParams which allows passing in specific combinations of Parameters into the Matrix.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.IncludeParams"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.IncludeParams", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"},
}
}
func schema_pkg_apis_pipeline_v1beta1_Param(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Param declares an ParamValues to use for the parameter called name.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"),
},
},
},
Required: []string{"name", "value"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"},
}
}
func schema_pkg_apis_pipeline_v1beta1_ParamSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ParamSpec defines arbitrary parameters needed beyond typed inputs (such as resources). Parameter values are provided by users as inputs on a TaskRun or PipelineRun.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name declares the name by which a parameter is referenced.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is the user-specified type of the parameter. The possible types are currently \"string\", \"array\" and \"object\", and \"string\" is the default.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the parameter that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"properties": {
SchemaProps: spec.SchemaProps{
Description: "Properties is the JSON Schema properties to support key-value pairs parameter.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec"),
},
},
},
},
},
"default": {
SchemaProps: spec.SchemaProps{
Description: "Default is the value a parameter takes if no input value is supplied. If default is set, a Task may be executed without a supplied value for the parameter.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"),
},
},
"enum": {
SchemaProps: spec.SchemaProps{
Description: "Enum declares a set of allowed param input values for tasks/pipelines that can be validated. If Enum is not set, no input validation is performed for the param.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec"},
}
}
func schema_pkg_apis_pipeline_v1beta1_ParamValue(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResultValue is a type alias of ParamValue",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"Type": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"StringVal": {
SchemaProps: spec.SchemaProps{
Description: "Represents the stored type of ParamValues.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"ArrayVal": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"ObjectVal": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"Type", "StringVal", "ArrayVal", "ObjectVal"},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_Pipeline(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Pipeline describes a list of Tasks to execute. It expresses how outputs of tasks feed into inputs of subsequent tasks.\n\nDeprecated: Please use v1.Pipeline instead.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the Pipeline from the client",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineDeclaredResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineDeclaredResource is used by a Pipeline to declare the types of the PipelineResources that it will required to run and names which can be used to refer to these PipelineResources in PipelineTaskResourceBindings.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name that will be used by the Pipeline to refer to this resource. It does not directly correspond to the name of any PipelineResources Task inputs or outputs, and it does not correspond to the actual names of the PipelineResources that will be bound in the PipelineRun.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is the type of the PipelineResource.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Optional declares the resource as optional. optional: true - the resource is considered optional optional: false - the resource is considered required (default/equivalent of not specifying it)",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"name", "type"},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineList contains a list of Pipeline",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Pipeline"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Pipeline", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRef can be used to refer to a specific instance of a Pipeline.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent",
Type: []string{"string"},
Format: "",
},
},
"bundle": {
SchemaProps: spec.SchemaProps{
Description: "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead. The field is staying there for go client backward compatibility, but is not used/allowed anymore.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineResourceBinding(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineResourceBinding connects a reference to an instance of a PipelineResource with a PipelineResource dependency that the Pipeline has declared\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the PipelineResource in the Pipeline's declaration",
Type: []string{"string"},
Format: "",
},
},
"resourceRef": {
SchemaProps: spec.SchemaProps{
Description: "ResourceRef is a reference to the instance of the actual PipelineResource that should be used",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef"),
},
},
"resourceSpec": {
SchemaProps: spec.SchemaProps{
Description: "ResourceSpec is specification of a resource that should be created and consumed by the task",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceSpec"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef", "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceSpec"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineResourceRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineResourceRef can be used to refer to a specific instance of a Resource\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineResult used to describe the results of a pipeline",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name the given name",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is the user-specified type of the result. The possible types are 'string', 'array', and 'object', with 'string' as the default. 'array' and 'object' types are alpha features.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a human-readable description of the result",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value the expression used to retrieve the value",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"),
},
},
},
Required: []string{"name", "value"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineRun(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.\n\nDeprecated: Please use v1.PipelineRun instead.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineRunList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunList contains a list of PipelineRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunResult used to describe the results of a pipeline",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the result's name as declared by the Pipeline",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value is the result returned from the execution of this PipelineRun",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"),
},
},
},
Required: []string{"name", "value"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineRunRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunRunStatus contains the name of the PipelineTask for this CustomRun or Run and the CustomRun or Run's Status",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipelineTaskName": {
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskName is the name of the PipelineTask.",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status is the CustomRunStatus for the corresponding CustomRun or Run",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/run/v1beta1.CustomRunStatus"),
},
},
"whenExpressions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/run/v1beta1.CustomRunStatus"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunSpec defines the desired state of PipelineRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipelineRef": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef"),
},
},
"pipelineSpec": {
SchemaProps: spec.SchemaProps{
Description: "Specifying PipelineSpec can be disabled by setting `disable-inline-spec` feature flag. See Pipeline.spec (API version: tekton.dev/v1beta1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec"),
},
},
"resources": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Resources is a list of bindings specifying which actual instances of PipelineResources to use for the resources the Pipeline has declared it needs.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceBinding"),
},
},
},
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params is a list of parameter names and values.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"),
},
},
},
},
},
"serviceAccountName": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Used for cancelling a pipelinerun (and maybe more later on)",
Type: []string{"string"},
Format: "",
},
},
"timeouts": {
SchemaProps: spec.SchemaProps{
Description: "Time after which the Pipeline times out. Currently three keys are accepted in the map pipeline, tasks and finally with Timeouts.pipeline >= Timeouts.tasks + Timeouts.finally",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TimeoutFields"),
},
},
"timeout": {
SchemaProps: spec.SchemaProps{
Description: "Timeout is the Time after which the Pipeline times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration\n\nDeprecated: use pipelineRunSpec.Timeouts.Pipeline instead",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"podTemplate": {
SchemaProps: spec.SchemaProps{
Description: "PodTemplate holds pod specific configuration",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"),
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces holds a set of workspace bindings that must match names with those declared in the pipeline.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding"),
},
},
},
},
},
"taskRunSpecs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "TaskRunSpecs holds a set of runtime specs",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRunSpec"),
},
},
},
},
},
"managedBy": {
SchemaProps: spec.SchemaProps{
Description: "ManagedBy indicates which controller is responsible for reconciling this resource. If unset or set to \"tekton.dev/pipeline\", the default Tekton controller will manage this resource. This field is immutable.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceBinding", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TimeoutFields", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunStatus defines the observed state of PipelineRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"observedGeneration": {
SchemaProps: spec.SchemaProps{
Description: "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.",
Type: []string{"integer"},
Format: "int64",
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Conditions the latest available observations of a resource's current state.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("knative.dev/pkg/apis.Condition"),
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"startTime": {
SchemaProps: spec.SchemaProps{
Description: "StartTime is the time the PipelineRun is actually started.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"completionTime": {
SchemaProps: spec.SchemaProps{
Description: "CompletionTime is the time the PipelineRun completed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"taskRuns": {
SchemaProps: spec.SchemaProps{
Description: "TaskRuns is a map of PipelineRunTaskRunStatus with the taskRun name as the key.\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus"),
},
},
},
},
},
"runs": {
SchemaProps: spec.SchemaProps{
Description: "Runs is a map of PipelineRunRunStatus with the run name as the key\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus"),
},
},
},
},
},
"pipelineResults": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "PipelineResults are the list of results written out by the pipeline task's containers",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult"),
},
},
},
},
},
"pipelineSpec": {
SchemaProps: spec.SchemaProps{
Description: "PipelineSpec contains the exact spec used to instantiate the run. See Pipeline.spec (API version: tekton.dev/v1beta1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec"),
},
},
"skippedTasks": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "list of tasks that were skipped due to when expressions evaluating to false",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask"),
},
},
},
},
},
"childReferences": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "list of TaskRun and Run names, PipelineTask names, and API versions/kinds for children of this PipelineRun.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference"),
},
},
},
},
},
"finallyStartTime": {
SchemaProps: spec.SchemaProps{
Description: "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"provenance": {
SchemaProps: spec.SchemaProps{
Description: "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance"),
},
},
"spanContext": {
SchemaProps: spec.SchemaProps{
Description: "SpanContext contains tracing span context fields",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineRunStatusFields(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunStatusFields holds the fields of PipelineRunStatus' status. This is defined separately and inlined so that other types can readily consume these fields via duck typing.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"startTime": {
SchemaProps: spec.SchemaProps{
Description: "StartTime is the time the PipelineRun is actually started.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"completionTime": {
SchemaProps: spec.SchemaProps{
Description: "CompletionTime is the time the PipelineRun completed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"taskRuns": {
SchemaProps: spec.SchemaProps{
Description: "TaskRuns is a map of PipelineRunTaskRunStatus with the taskRun name as the key.\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus"),
},
},
},
},
},
"runs": {
SchemaProps: spec.SchemaProps{
Description: "Runs is a map of PipelineRunRunStatus with the run name as the key\n\nDeprecated: use ChildReferences instead. As of v0.45.0, this field is no longer populated and is only included for backwards compatibility with older server versions.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus"),
},
},
},
},
},
"pipelineResults": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "PipelineResults are the list of results written out by the pipeline task's containers",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult"),
},
},
},
},
},
"pipelineSpec": {
SchemaProps: spec.SchemaProps{
Description: "PipelineSpec contains the exact spec used to instantiate the run. See Pipeline.spec (API version: tekton.dev/v1beta1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec"),
},
},
"skippedTasks": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "list of tasks that were skipped due to when expressions evaluating to false",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask"),
},
},
},
},
},
"childReferences": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "list of TaskRun and Run names, PipelineTask names, and API versions/kinds for children of this PipelineRun.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference"),
},
},
},
},
},
"finallyStartTime": {
SchemaProps: spec.SchemaProps{
Description: "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"provenance": {
SchemaProps: spec.SchemaProps{
Description: "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance"),
},
},
"spanContext": {
SchemaProps: spec.SchemaProps{
Description: "SpanContext contains tracing span context fields",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineRunTaskRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipelineTaskName": {
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskName is the name of the PipelineTask.",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status is the TaskRunStatus for the corresponding TaskRun",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus"),
},
},
"whenExpressions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineSpec defines the desired state of Pipeline.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is a user-facing name of the pipeline that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the pipeline that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"resources": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Deprecated: Unused, preserved only for backwards compatibility",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineDeclaredResource"),
},
},
},
},
},
"tasks": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Tasks declares the graph of Tasks that execute when this Pipeline is run.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTask"),
},
},
},
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params declares a list of input parameters that must be supplied when this Pipeline is run.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec"),
},
},
},
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces declares a set of named workspaces that are expected to be provided by a PipelineRun.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineWorkspaceDeclaration"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are values that this pipeline can output once run",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResult"),
},
},
},
},
},
"finally": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Finally declares the list of Tasks that execute just before leaving the Pipeline i.e. either after all Tasks are finished executing successfully or after a failure which would result in ending the Pipeline",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTask"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineDeclaredResource", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineWorkspaceDeclaration"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTask defines a task in a Pipeline, passing inputs from both Params and from the output of previous tasks.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of this task within the context of a Pipeline. Name is used as a coordinate with the `from` and `runAfter` fields to establish the execution order of tasks relative to one another.",
Type: []string{"string"},
Format: "",
},
},
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is the display name of this task within the context of a Pipeline. This display name may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is the description of this task within the context of a Pipeline. This description may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"taskRef": {
SchemaProps: spec.SchemaProps{
Description: "TaskRef is a reference to a task definition.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef"),
},
},
"taskSpec": {
SchemaProps: spec.SchemaProps{
Description: "TaskSpec is a specification of a task Specifying TaskSpec can be disabled by setting `disable-inline-spec` feature flag. See Task.spec (API version: tekton.dev/v1beta1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask"),
},
},
"when": {
SchemaProps: spec.SchemaProps{
Description: "WhenExpressions is a list of when expressions that need to be true for the task to run",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"),
},
},
},
},
},
"retries": {
SchemaProps: spec.SchemaProps{
Description: "Retries represents how many times this task should be retried in case of task failure: ConditionSucceeded set to False",
Type: []string{"integer"},
Format: "int32",
},
},
"runAfter": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "RunAfter is the list of PipelineTask names that should be executed before this Task executes. (Used to force a specific ordering in graph execution.)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: Unused, preserved only for backwards compatibility",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources"),
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Parameters declares parameters passed to this task.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"),
},
},
},
},
},
"matrix": {
SchemaProps: spec.SchemaProps{
Description: "Matrix declares parameters used to fan out this task.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Matrix"),
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces maps workspaces from the pipeline spec to the workspaces declared in the Task.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspacePipelineTaskBinding"),
},
},
},
},
},
"timeout": {
SchemaProps: spec.SchemaProps{
Description: "Duration after which the TaskRun times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"pipelineRef": {
SchemaProps: spec.SchemaProps{
Description: "PipelineRef is a reference to a pipeline definition Note: PipelineRef is in preview mode and not yet supported",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef"),
},
},
"pipelineSpec": {
SchemaProps: spec.SchemaProps{
Description: "PipelineSpec is a specification of a pipeline Note: PipelineSpec is in preview mode and not yet supported Specifying PipelineSpec can be disabled by setting `disable-inline-spec` feature flag. See Pipeline.spec (API version: tekton.dev/v1beta1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec"),
},
},
"onError": {
SchemaProps: spec.SchemaProps{
Description: "OnError defines the exiting behavior of a PipelineRun on error can be set to [ continue | stopAndFail ]",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Matrix", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspacePipelineTaskBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineTaskInputResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskInputResource maps the name of a declared PipelineResource input dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources that should be used. This input may come from a previous task.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the PipelineResource as declared by the Task.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Description: "Resource is the name of the DeclaredPipelineResource to use.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"from": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "From is the list of PipelineTask names that the resource has to come from. (Implies an ordering in the execution graph.)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"name", "resource"},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineTaskMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskMetadata contains the labels or annotations for an EmbeddedTask",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"labels": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineTaskOutputResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskOutputResource maps the name of a declared PipelineResource output dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources that should be used.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the PipelineResource as declared by the Task.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Description: "Resource is the name of the DeclaredPipelineResource to use.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "resource"},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineTaskParam(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskParam is used to provide arbitrary string parameters to a Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "value"},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineTaskResources(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskResources allows a Pipeline to declare how its DeclaredPipelineResources should be provided to a Task as its inputs and outputs.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"inputs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Inputs holds the mapping from the PipelineResources declared in DeclaredPipelineResources to the input PipelineResources required by the Task.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskInputResource"),
},
},
},
},
},
"outputs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Outputs holds the mapping from the PipelineResources declared in DeclaredPipelineResources to the input PipelineResources required by the Task.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskOutputResource"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskInputResource", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskOutputResource"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineTaskRun(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskRun reports the results of running a step in the Task. Each task has the potential to succeed or fail (based on the exit code) and produces logs.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineTaskRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PipelineTaskRunSpec can be used to configure specific specs for a concrete Task",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipelineTaskName": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"taskServiceAccountName": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"taskPodTemplate": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"),
},
},
"stepOverrides": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride"),
},
},
},
},
},
"sidecarOverrides": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride"),
},
},
},
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata"),
},
},
"computeResources": {
SchemaProps: spec.SchemaProps{
Description: "Compute resources to use for this TaskRun",
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"timeout": {
SchemaProps: spec.SchemaProps{
Description: "Duration after which the TaskRun times out. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1beta1_PipelineWorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding.\n\nDeprecated: use PipelineWorkspaceDeclaration type instead",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of a workspace to be provided by a PipelineRun.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.",
Type: []string{"string"},
Format: "",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_PropertySpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PropertySpec defines the struct for object keys",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_Provenance(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Provenance contains metadata about resources used in the TaskRun/PipelineRun such as the source from where a remote build definition was fetched. This field aims to carry minimum amoumt of metadata in *Run status so that Tekton Chains can capture them in the provenance.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"configSource": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: Use RefSource instead",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource"),
},
},
"refSource": {
SchemaProps: spec.SchemaProps{
Description: "RefSource identifies the source where a remote task/pipeline came from.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"),
},
},
"featureFlags": {
SchemaProps: spec.SchemaProps{
Description: "FeatureFlags identifies the feature flags that were used during the task/pipeline run",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/config.FeatureFlags"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/config.FeatureFlags", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"},
}
}
func schema_pkg_apis_pipeline_v1beta1_Ref(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Ref can be used to refer to a specific instance of a StepAction.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referenced step",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_RefSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RefSource contains the information that can uniquely identify where a remote built definition came from i.e. Git repositories, Tekton Bundles in OCI registry and hub.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"uri": {
SchemaProps: spec.SchemaProps{
Description: "URI indicates the identity of the source of the build definition. Example: \"https://github.com/tektoncd/catalog\"",
Type: []string{"string"},
Format: "",
},
},
"digest": {
SchemaProps: spec.SchemaProps{
Description: "Digest is a collection of cryptographic digests for the contents of the artifact specified by URI. Example: {\"sha1\": \"f99d13e554ffcb696dee719fa85b695cb5b0f428\"}",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"entryPoint": {
SchemaProps: spec.SchemaProps{
Description: "EntryPoint identifies the entry point into the build. This is often a path to a build definition file and/or a target label within that file. Example: \"task/git-clone/0.10/git-clone.yaml\"",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_ResolverRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"resolver": {
SchemaProps: spec.SchemaProps{
Description: "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".",
Type: []string{"string"},
Format: "",
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"},
}
}
func schema_pkg_apis_pipeline_v1beta1_ResultRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResultRef is a type that represents a reference to a task run result",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipelineTask": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"result": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resultsIndex": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"property": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"pipelineTask", "result", "resultsIndex", "property"},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the Sidecar specified as a DNS_LABEL. Each Sidecar in a Task must have a unique name (DNS_LABEL). Cannot be updated.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "Image name to be used by the Sidecar. More info: https://kubernetes.io/docs/concepts/containers/images",
Type: []string{"string"},
Format: "",
},
},
"command": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Sidecar's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"args": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"workingDir": {
SchemaProps: spec.SchemaProps{
Description: "Sidecar's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"ports": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"containerPort",
"protocol",
},
"x-kubernetes-list-type": "map",
"x-kubernetes-patch-merge-key": "containerPort",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of ports to expose from the Sidecar. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ContainerPort"),
},
},
},
},
},
"envFrom": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of sources to populate environment variables in the Sidecar. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the Sidecar is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvFromSource"),
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables to set in the Sidecar. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Compute Resources required by this Sidecar. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"volumeMounts": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Volumes to mount into the Sidecar's filesystem. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeMount"),
},
},
},
},
},
"volumeDevices": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "devicePath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "volumeDevices is the list of block devices to be used by the Sidecar.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeDevice"),
},
},
},
},
},
"livenessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Periodic probe of Sidecar liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"readinessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Periodic probe of Sidecar service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"startupProbe": {
SchemaProps: spec.SchemaProps{
Description: "StartupProbe indicates that the Pod the Sidecar is running in has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"lifecycle": {
SchemaProps: spec.SchemaProps{
Description: "Actions that the management system should take in response to Sidecar lifecycle events. Cannot be updated.",
Ref: ref("k8s.io/api/core/v1.Lifecycle"),
},
},
"terminationMessagePath": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Path at which the file to which the Sidecar's termination message will be written is mounted into the Sidecar's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"terminationMessagePolicy": {
SchemaProps: spec.SchemaProps{
Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the Sidecar status message on both success and failure. FallbackToLogsOnError will use the last chunk of Sidecar log output if the termination message file is empty and the Sidecar exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
Type: []string{"string"},
Format: "",
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext defines the security options the Sidecar should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
Ref: ref("k8s.io/api/core/v1.SecurityContext"),
},
},
"stdin": {
SchemaProps: spec.SchemaProps{
Description: "Whether this Sidecar should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the Sidecar will always result in EOF. Default is false.",
Type: []string{"boolean"},
Format: "",
},
},
"stdinOnce": {
SchemaProps: spec.SchemaProps{
Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on Sidecar start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the Sidecar is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
Type: []string{"boolean"},
Format: "",
},
},
"tty": {
SchemaProps: spec.SchemaProps{
Description: "Whether this Sidecar should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
Type: []string{"boolean"},
Format: "",
},
},
"script": {
SchemaProps: spec.SchemaProps{
Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.",
Type: []string{"string"},
Format: "",
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage"),
},
},
},
},
},
"restartPolicy": {
SchemaProps: spec.SchemaProps{
Description: "RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an initContainer and must have it's policy set to \"Always\". It is currently left optional to help support Kubernetes versions prior to 1.29 when this feature was introduced.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"},
}
}
func schema_pkg_apis_pipeline_v1beta1_SidecarState(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SidecarState reports the results of running a sidecar in a Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"waiting": {
SchemaProps: spec.SchemaProps{
Description: "Details about a waiting container",
Ref: ref("k8s.io/api/core/v1.ContainerStateWaiting"),
},
},
"running": {
SchemaProps: spec.SchemaProps{
Description: "Details about a running container",
Ref: ref("k8s.io/api/core/v1.ContainerStateRunning"),
},
},
"terminated": {
SchemaProps: spec.SchemaProps{
Description: "Details about a terminated container",
Ref: ref("k8s.io/api/core/v1.ContainerStateTerminated"),
},
},
"name": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"container": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"imageID": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"},
}
}
func schema_pkg_apis_pipeline_v1beta1_SkippedTask(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SkippedTask is used to describe the Tasks that were skipped due to their When Expressions evaluating to False. This is a struct because we are looking into including more details about the When Expressions that caused this Task to be skipped.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the Pipeline Task name",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "Reason is the cause of the PipelineTask being skipped.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"whenExpressions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"),
},
},
},
},
},
},
Required: []string{"name", "reason"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"},
}
}
func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Step runs a subcomponent of a Task",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the Step specified as a DNS_LABEL. Each Step in a Task must have a unique name.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is a user-facing name of the step that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "Image reference name to run for this Step. More info: https://kubernetes.io/docs/concepts/containers/images",
Type: []string{"string"},
Format: "",
},
},
"command": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"args": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"workingDir": {
SchemaProps: spec.SchemaProps{
Description: "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"ports": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"containerPort",
"protocol",
},
"x-kubernetes-list-type": "map",
"x-kubernetes-patch-merge-key": "containerPort",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of ports to expose from the Step's container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ContainerPort"),
},
},
},
},
},
"envFrom": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvFromSource"),
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables to set in the container. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Compute Resources required by this Step. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"volumeMounts": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Volumes to mount into the Step's filesystem. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeMount"),
},
},
},
},
},
"volumeDevices": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "devicePath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "volumeDevices is the list of block devices to be used by the Step.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeDevice"),
},
},
},
},
},
"livenessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Periodic probe of container liveness. Step will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"readinessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Periodic probe of container service readiness. Step will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"startupProbe": {
SchemaProps: spec.SchemaProps{
Description: "DeprecatedStartupProbe indicates that the Pod this Step runs in has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"lifecycle": {
SchemaProps: spec.SchemaProps{
Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.",
Ref: ref("k8s.io/api/core/v1.Lifecycle"),
},
},
"terminationMessagePath": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: This field will be removed in a future release and can't be meaningfully used.",
Type: []string{"string"},
Format: "",
},
},
"terminationMessagePolicy": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: This field will be removed in a future release and can't be meaningfully used.",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
Type: []string{"string"},
Format: "",
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
Ref: ref("k8s.io/api/core/v1.SecurityContext"),
},
},
"stdin": {
SchemaProps: spec.SchemaProps{
Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.\n\nDeprecated: This field will be removed in a future release.",
Type: []string{"boolean"},
Format: "",
},
},
"stdinOnce": {
SchemaProps: spec.SchemaProps{
Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false\n\nDeprecated: This field will be removed in a future release.",
Type: []string{"boolean"},
Format: "",
},
},
"tty": {
SchemaProps: spec.SchemaProps{
Description: "Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.\n\nDeprecated: This field will be removed in a future release.",
Type: []string{"boolean"},
Format: "",
},
},
"script": {
SchemaProps: spec.SchemaProps{
Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.",
Type: []string{"string"},
Format: "",
},
},
"timeout": {
SchemaProps: spec.SchemaProps{
Description: "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage"),
},
},
},
},
},
"onError": {
SchemaProps: spec.SchemaProps{
Description: "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ]",
Type: []string{"string"},
Format: "",
},
},
"stdoutConfig": {
SchemaProps: spec.SchemaProps{
Description: "Stores configuration for the stdout stream of the step.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig"),
},
},
"stderrConfig": {
SchemaProps: spec.SchemaProps{
Description: "Stores configuration for the stderr stream of the step.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig"),
},
},
"ref": {
SchemaProps: spec.SchemaProps{
Description: "Contains the reference to an existing StepAction.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Ref"),
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params declares parameters passed to this step action.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results declares StepResults produced by the Step.\n\nIt can be used in an inlined Step when used to store Results to $(step.results.resultName.path). It cannot be used when referencing StepActions using [v1beta1.Step.Ref]. The Results declared by the StepActions will be stored here instead.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult"),
},
},
},
},
},
"when": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"),
},
},
},
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Ref", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1beta1_StepAction(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepAction represents the actionable components of Step. The Step can only reference it from the cluster or using remote resolution.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the Step from the client",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepActionSpec"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepActionSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_StepActionList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepActionList contains a list of StepActions",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepAction"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepAction", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_StepActionSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepActionSpec contains the actionable components of a step.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the stepaction that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "Image reference name to run for this StepAction. More info: https://kubernetes.io/docs/concepts/containers/images",
Type: []string{"string"},
Format: "",
},
},
"command": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"args": {
SchemaProps: spec.SchemaProps{
Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables to set in the container. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"script": {
SchemaProps: spec.SchemaProps{
Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.",
Type: []string{"string"},
Format: "",
},
},
"workingDir": {
SchemaProps: spec.SchemaProps{
Description: "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params is a list of input parameters required to run the stepAction. Params must be supplied as inputs in Steps unless they declare a defaultvalue.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are values that this StepAction can output",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult"),
},
},
},
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ The value set in StepAction will take precedence over the value from Task.",
Ref: ref("k8s.io/api/core/v1.SecurityContext"),
},
},
"volumeMounts": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Volumes to mount into the Step's filesystem. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeMount"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeMount"},
}
}
func schema_pkg_apis_pipeline_v1beta1_StepOutputConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepOutputConfig stores configuration for a step output stream.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path to duplicate stdout stream to on container's local filesystem.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_StepState(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepState reports the results of running a step in a Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"waiting": {
SchemaProps: spec.SchemaProps{
Description: "Details about a waiting container",
Ref: ref("k8s.io/api/core/v1.ContainerStateWaiting"),
},
},
"running": {
SchemaProps: spec.SchemaProps{
Description: "Details about a running container",
Ref: ref("k8s.io/api/core/v1.ContainerStateRunning"),
},
},
"terminated": {
SchemaProps: spec.SchemaProps{
Description: "Details about a terminated container",
Ref: ref("k8s.io/api/core/v1.ContainerStateTerminated"),
},
},
"name": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"container": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"imageID": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"results": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult"),
},
},
},
},
},
"provenance": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance"),
},
},
"inputs": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Artifact"),
},
},
},
},
},
"outputs": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Artifact"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Artifact", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult", "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"},
}
}
func schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StepTemplate is a template for a Step",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Default name for each Step specified as a DNS_LABEL. Each Step in a Task must have a unique name. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "Default image name to use for each Step. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
Type: []string{"string"},
Format: "",
},
},
"command": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"args": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"workingDir": {
SchemaProps: spec.SchemaProps{
Description: "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"ports": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"containerPort",
"protocol",
},
"x-kubernetes-list-type": "map",
"x-kubernetes-patch-merge-key": "containerPort",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of ports to expose from the Step's container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ContainerPort"),
},
},
},
},
},
"envFrom": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of sources to populate environment variables in the Step. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvFromSource"),
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables to set in the container. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Compute Resources required by this Step. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"volumeMounts": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Volumes to mount into the Step's filesystem. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeMount"),
},
},
},
},
},
"volumeDevices": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
"x-kubernetes-patch-merge-key": "devicePath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "volumeDevices is the list of block devices to be used by the Step.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.VolumeDevice"),
},
},
},
},
},
"livenessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"readinessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"startupProbe": {
SchemaProps: spec.SchemaProps{
Description: "DeprecatedStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n\nDeprecated: This field will be removed in a future release.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"lifecycle": {
SchemaProps: spec.SchemaProps{
Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.\n\nDeprecated: This field will be removed in a future release.",
Ref: ref("k8s.io/api/core/v1.Lifecycle"),
},
},
"terminationMessagePath": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: This field will be removed in a future release and cannot be meaningfully used.",
Type: []string{"string"},
Format: "",
},
},
"terminationMessagePolicy": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: This field will be removed in a future release and cannot be meaningfully used.",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
Type: []string{"string"},
Format: "",
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
Ref: ref("k8s.io/api/core/v1.SecurityContext"),
},
},
"stdin": {
SchemaProps: spec.SchemaProps{
Description: "Whether this Step should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the Step will always result in EOF. Default is false.\n\nDeprecated: This field will be removed in a future release.",
Type: []string{"boolean"},
Format: "",
},
},
"stdinOnce": {
SchemaProps: spec.SchemaProps{
Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false\n\nDeprecated: This field will be removed in a future release.",
Type: []string{"boolean"},
Format: "",
},
},
"tty": {
SchemaProps: spec.SchemaProps{
Description: "Whether this Step should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.\n\nDeprecated: This field will be removed in a future release.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"},
}
}
func schema_pkg_apis_pipeline_v1beta1_Task(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.\n\nDeprecated: Please use v1.Task instead.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the Task from the client",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskBreakpoints(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskBreakpoints defines the breakpoint config for a particular Task",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"onFailure": {
SchemaProps: spec.SchemaProps{
Description: "if enabled, pause TaskRun on failure of a step failed step will not exit",
Type: []string{"string"},
Format: "",
},
},
"beforeSteps": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskList contains a list of Task",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Task"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Task", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRef can be used to refer to a specific instance of a task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "TaskKind indicates the Kind of the Task: 1. Namespaced Task when Kind is set to \"Task\". If Kind is \"\", it defaults to \"Task\". 2. Custom Task when Kind is non-empty and APIVersion is non-empty",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent Note: A Task with non-empty APIVersion and Kind is considered a Custom Task",
Type: []string{"string"},
Format: "",
},
},
"bundle": {
SchemaProps: spec.SchemaProps{
Description: "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead. The field is staying there for go client backward compatibility, but is not used/allowed anymore.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskResource defines an input or output Resource declared as a requirement by a Task. The Name field will be used to refer to these Resources within the Task definition, and when provided as an Input, the Name will be the path to the volume mounted containing this Resource as an input (e.g. an input Resource named `workspace` will be mounted at `/workspace`).\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name declares the name by which a resource is referenced in the definition. Resources may be referenced by name in the definition of a Task's steps.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is the type of this resource;",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the declared resource that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"targetPath": {
SchemaProps: spec.SchemaProps{
Description: "TargetPath is the path in workspace directory where the resource will be copied.",
Type: []string{"string"},
Format: "",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Optional declares the resource as optional. By default optional is set to false which makes a resource required. optional: true - the resource is considered optional optional: false - the resource is considered required (equivalent of not specifying it)",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"name", "type"},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskResourceBinding(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskResourceBinding points to the PipelineResource that will be used for the Task input or output called Name.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the PipelineResource in the Pipeline's declaration",
Type: []string{"string"},
Format: "",
},
},
"resourceRef": {
SchemaProps: spec.SchemaProps{
Description: "ResourceRef is a reference to the instance of the actual PipelineResource that should be used",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef"),
},
},
"resourceSpec": {
SchemaProps: spec.SchemaProps{
Description: "ResourceSpec is specification of a resource that should be created and consumed by the task",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceSpec"),
},
},
"paths": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Paths will probably be removed in #1284, and then PipelineResourceBinding can be used instead. The optional Path field corresponds to a path on disk at which the Resource can be found (used when providing the resource via mounted volume, overriding the default logic to fetch the Resource).",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef", "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceSpec"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskResources(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskResources allows a Pipeline to declare how its DeclaredPipelineResources should be provided to a Task as its inputs and outputs.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"inputs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Inputs holds the mapping from the PipelineResources declared in DeclaredPipelineResources to the input PipelineResources required by the Task.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResource"),
},
},
},
},
},
"outputs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Outputs holds the mapping from the PipelineResources declared in DeclaredPipelineResources to the input PipelineResources required by the Task.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResource"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResource"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskResult(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskResult used to describe the results of a task",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name the given name",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.",
Type: []string{"string"},
Format: "",
},
},
"properties": {
SchemaProps: spec.SchemaProps{
Description: "Properties is the JSON Schema properties to support key-value pairs results.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec"),
},
},
},
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a human-readable description of the result",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value the expression used to retrieve the value of the result from an underlying Step.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"),
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRun(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.\n\nDeprecated: Please use v1.TaskRun instead.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRunDebug(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunDebug defines the breakpoint config for a particular TaskRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"breakpoints": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskBreakpoints"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskBreakpoints"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRunInputs(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunInputs holds the input values that this task was invoked with.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"resources": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding"),
},
},
},
},
},
"params": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRunList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunList contains a list of TaskRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRun"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRun", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRunOutputs(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunOutputs holds the output values that this task was invoked with.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"resources": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRunResources(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunResources allows a TaskRun to declare inputs and outputs TaskResourceBinding\n\nDeprecated: Unused, preserved only for backwards compatibility",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"inputs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Inputs holds the inputs resources this task was invoked with",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding"),
},
},
},
},
},
"outputs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Outputs holds the inputs resources this task was invoked with",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunStepResult is a type alias of TaskRunResult",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name the given name",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value the given value of the result",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"),
},
},
},
Required: []string{"name", "value"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRunSidecarOverride(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunSidecarOverride is used to override the values of a Sidecar in the corresponding Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The name of the Sidecar to override.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "The resource requirements to apply to the Sidecar.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
},
Required: []string{"name", "resources"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ResourceRequirements"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunSpec defines the desired state of TaskRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"debug": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunDebug"),
},
},
"params": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"),
},
},
},
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: Unused, preserved only for backwards compatibility",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources"),
},
},
"serviceAccountName": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"taskRef": {
SchemaProps: spec.SchemaProps{
Description: "no more than one of the TaskRef and TaskSpec may be specified.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef"),
},
},
"taskSpec": {
SchemaProps: spec.SchemaProps{
Description: "Specifying TaskSpec can be disabled by setting `disable-inline-spec` feature flag. See Task.spec (API version: tekton.dev/v1beta1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Used for cancelling a TaskRun (and maybe more later on)",
Type: []string{"string"},
Format: "",
},
},
"statusMessage": {
SchemaProps: spec.SchemaProps{
Description: "Status message for cancellation.",
Type: []string{"string"},
Format: "",
},
},
"retries": {
SchemaProps: spec.SchemaProps{
Description: "Retries represents how many times this TaskRun should be retried in the event of Task failure.",
Type: []string{"integer"},
Format: "int32",
},
},
"timeout": {
SchemaProps: spec.SchemaProps{
Description: "Time after which one retry attempt times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"podTemplate": {
SchemaProps: spec.SchemaProps{
Description: "PodTemplate holds pod specific configuration",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"),
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces is a list of WorkspaceBindings from volumes to workspaces.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding"),
},
},
},
},
},
"stepOverrides": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Overrides to apply to Steps in this TaskRun. If a field is specified in both a Step and a StepOverride, the value from the StepOverride will be used. This field is only supported when the alpha feature gate is enabled.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride"),
},
},
},
},
},
"sidecarOverrides": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Overrides to apply to Sidecars in this TaskRun. If a field is specified in both a Sidecar and a SidecarOverride, the value from the SidecarOverride will be used. This field is only supported when the alpha feature gate is enabled.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride"),
},
},
},
},
},
"computeResources": {
SchemaProps: spec.SchemaProps{
Description: "Compute resources to use for this TaskRun",
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"managedBy": {
SchemaProps: spec.SchemaProps{
Description: "ManagedBy indicates which controller is responsible for reconciling this resource. If unset or set to \"tekton.dev/pipeline\", the default Tekton controller will manage this resource. This field is immutable.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunDebug", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunStatus defines the observed state of TaskRun",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"observedGeneration": {
SchemaProps: spec.SchemaProps{
Description: "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.",
Type: []string{"integer"},
Format: "int64",
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Conditions the latest available observations of a resource's current state.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("knative.dev/pkg/apis.Condition"),
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"podName": {
SchemaProps: spec.SchemaProps{
Description: "PodName is the name of the pod responsible for executing this task's steps.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"startTime": {
SchemaProps: spec.SchemaProps{
Description: "StartTime is the time the build is actually started.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"completionTime": {
SchemaProps: spec.SchemaProps{
Description: "CompletionTime is the time the build completed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"steps": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Steps describes the state of each build step container.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState"),
},
},
},
},
},
"cloudEvents": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "CloudEvents describe the state of each cloud event requested via a CloudEventResource.\n\nDeprecated: Removed in v0.44.0.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery"),
},
},
},
},
},
"retriesStatus": {
SchemaProps: spec.SchemaProps{
Description: "RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures. All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant. See TaskRun.status (API version: tekton.dev/v1beta1)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus"),
},
},
},
},
},
"resourcesResult": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results from Resources built during the TaskRun. This is tomb-stoned along with the removal of pipelineResources Deprecated: this field is not populated and is preserved only for backwards compatibility",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/result.RunResult"),
},
},
},
},
},
"taskResults": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "TaskRunResults are the list of results written out by the task's containers",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult"),
},
},
},
},
},
"sidecars": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "The list has one entry per sidecar in the manifest. Each entry is represents the imageid of the corresponding sidecar.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState"),
},
},
},
},
},
"taskSpec": {
SchemaProps: spec.SchemaProps{
Description: "TaskSpec contains the Spec from the dereferenced Task definition used to instantiate this TaskRun. See Task.spec (API version tekton.dev/v1beta1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec"),
},
},
"provenance": {
SchemaProps: spec.SchemaProps{
Description: "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance"),
},
},
"spanContext": {
SchemaProps: spec.SchemaProps{
Description: "SpanContext contains tracing span context fields",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"podName"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "github.com/tektoncd/pipeline/pkg/result.RunResult", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRunStatusFields(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunStatusFields holds the fields of TaskRun's status. This is defined separately and inlined so that other types can readily consume these fields via duck typing.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"podName": {
SchemaProps: spec.SchemaProps{
Description: "PodName is the name of the pod responsible for executing this task's steps.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"startTime": {
SchemaProps: spec.SchemaProps{
Description: "StartTime is the time the build is actually started.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"completionTime": {
SchemaProps: spec.SchemaProps{
Description: "CompletionTime is the time the build completed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"steps": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Steps describes the state of each build step container.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState"),
},
},
},
},
},
"cloudEvents": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "CloudEvents describe the state of each cloud event requested via a CloudEventResource.\n\nDeprecated: Removed in v0.44.0.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery"),
},
},
},
},
},
"retriesStatus": {
SchemaProps: spec.SchemaProps{
Description: "RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures. All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant. See TaskRun.status (API version: tekton.dev/v1beta1)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus"),
},
},
},
},
},
"resourcesResult": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results from Resources built during the TaskRun. This is tomb-stoned along with the removal of pipelineResources Deprecated: this field is not populated and is preserved only for backwards compatibility",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/result.RunResult"),
},
},
},
},
},
"taskResults": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "TaskRunResults are the list of results written out by the task's containers",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult"),
},
},
},
},
},
"sidecars": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "The list has one entry per sidecar in the manifest. Each entry is represents the imageid of the corresponding sidecar.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState"),
},
},
},
},
},
"taskSpec": {
SchemaProps: spec.SchemaProps{
Description: "TaskSpec contains the Spec from the dereferenced Task definition used to instantiate this TaskRun. See Task.spec (API version tekton.dev/v1beta1)",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec"),
},
},
"provenance": {
SchemaProps: spec.SchemaProps{
Description: "Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance"),
},
},
"spanContext": {
SchemaProps: spec.SchemaProps{
Description: "SpanContext contains tracing span context fields",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"podName"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "github.com/tektoncd/pipeline/pkg/result.RunResult", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskRunStepOverride(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskRunStepOverride is used to override the values of a Step in the corresponding Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The name of the Step to override.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "The resource requirements to apply to the Step.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
},
Required: []string{"name", "resources"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ResourceRequirements"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TaskSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TaskSpec defines the desired state of Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Resources is a list input and output resource to run the task Resources are represented in TaskRuns as bindings to instances of PipelineResources.\n\nDeprecated: Unused, preserved only for backwards compatibility",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources"),
},
},
"params": {
SchemaProps: spec.SchemaProps{
Description: "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec"),
},
},
},
},
},
"displayName": {
SchemaProps: spec.SchemaProps{
Description: "DisplayName is a user-facing name of the task that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the task that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"steps": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step"),
},
},
},
},
},
"volumes": {
SchemaProps: spec.SchemaProps{
Description: "Volumes is a collection of volumes that are available to mount into the steps of the build. See Pod.spec.volumes (API version: v1)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Volume"),
},
},
},
},
},
"stepTemplate": {
SchemaProps: spec.SchemaProps{
Description: "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate"),
},
},
"sidecars": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar"),
},
},
},
},
},
"workspaces": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Workspaces are the volumes that this Task requires.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration"),
},
},
},
},
},
"results": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Results are values that this Task can output",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration", "k8s.io/api/core/v1.Volume"},
}
}
func schema_pkg_apis_pipeline_v1beta1_TimeoutFields(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TimeoutFields allows granular specification of pipeline, task, and finally timeouts",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pipeline": {
SchemaProps: spec.SchemaProps{
Description: "Pipeline sets the maximum allowed duration for execution of the entire pipeline. The sum of individual timeouts for tasks and finally must not exceed this value.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"tasks": {
SchemaProps: spec.SchemaProps{
Description: "Tasks sets the maximum allowed duration of this pipeline's tasks",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
"finally": {
SchemaProps: spec.SchemaProps{
Description: "Finally sets the maximum allowed duration of this pipeline's finally",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
func schema_pkg_apis_pipeline_v1beta1_WhenExpression(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WhenExpression allows a PipelineTask to declare expressions to be evaluated before the Task is run to determine whether the Task should be executed or skipped",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"input": {
SchemaProps: spec.SchemaProps{
Description: "Input is the string for guard checking which can be a static input or an output from a parent Task",
Type: []string{"string"},
Format: "",
},
},
"operator": {
SchemaProps: spec.SchemaProps{
Description: "Operator that represents an Input's relationship to the values",
Type: []string{"string"},
Format: "",
},
},
"values": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Values is an array of strings, which is compared against the input, for guard checking It must be non-empty",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"cel": {
SchemaProps: spec.SchemaProps{
Description: "CEL is a string of Common Language Expression, which can be used to conditionally execute the task based on the result of the expression evaluation More info about CEL syntax: https://github.com/google/cel-spec/blob/master/doc/langdef.md",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_WorkspaceBinding(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WorkspaceBinding maps a Task's declared workspace to a Volume.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the workspace populated by the volume.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"subPath": {
SchemaProps: spec.SchemaProps{
Description: "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).",
Type: []string{"string"},
Format: "",
},
},
"volumeClaimTemplate": {
SchemaProps: spec.SchemaProps{
Description: "VolumeClaimTemplate is a template for a claim that will be created in the same namespace. The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun. See PersistentVolumeClaim (API version: v1)",
Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"),
},
},
"persistentVolumeClaim": {
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.",
Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"),
},
},
"emptyDir": {
SchemaProps: spec.SchemaProps{
Description: "EmptyDir represents a temporary directory that shares a Task's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir Either this OR PersistentVolumeClaim can be used.",
Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"),
},
},
"configMap": {
SchemaProps: spec.SchemaProps{
Description: "ConfigMap represents a configMap that should populate this workspace.",
Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"),
},
},
"secret": {
SchemaProps: spec.SchemaProps{
Description: "Secret represents a secret that should populate this workspace.",
Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"),
},
},
"projected": {
SchemaProps: spec.SchemaProps{
Description: "Projected represents a projected volume that should populate this workspace.",
Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"),
},
},
"csi": {
SchemaProps: spec.SchemaProps{
Description: "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.",
Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"),
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource"},
}
}
func schema_pkg_apis_pipeline_v1beta1_WorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WorkspaceDeclaration is a declaration of a volume that a Task requires.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name by which you can bind the volume at runtime.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is an optional human readable description of this volume.",
Type: []string{"string"},
Format: "",
},
},
"mountPath": {
SchemaProps: spec.SchemaProps{
Description: "MountPath overrides the directory that the volume will be made available at.",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly dictates whether a mounted volume is writable. By default this field is false and so mounted volumes are writable.",
Type: []string{"boolean"},
Format: "",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Optional marks a Workspace as not being required in TaskRuns. By default this field is false and so declared workspaces are required.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_WorkspacePipelineTaskBinding(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be mapped to a task's declared workspace.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the workspace as declared by the task",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"workspace": {
SchemaProps: spec.SchemaProps{
Description: "Workspace is the name of the workspace declared by the pipeline",
Type: []string{"string"},
Format: "",
},
},
"subPath": {
SchemaProps: spec.SchemaProps{
Description: "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
}
}
func schema_pkg_apis_pipeline_v1beta1_WorkspaceUsage(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access to a Workspace defined in a Task.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the workspace this Step or Sidecar wants access to.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"mountPath": {
SchemaProps: spec.SchemaProps{
Description: "MountPath is the path that the workspace should be mounted to inside the Step or Sidecar, overriding any MountPath specified in the Task's WorkspaceDeclaration.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "mountPath"},
},
},
}
}
func schema_pkg_apis_resolution_v1beta1_ResolutionRequest(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResolutionRequest is an object for requesting the content of a Tekton resource like a pipeline.yaml.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the information for the request part of the resource request.",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequestSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status communicates the state of the request and, ultimately, the content of the resolved resource.",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequestStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequestSpec", "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequestStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_resolution_v1beta1_ResolutionRequestList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResolutionRequestList is a list of ResolutionRequests.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequest"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1.ResolutionRequest", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_resolution_v1beta1_ResolutionRequestSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResolutionRequestSpec are all the fields in the spec of the ResolutionRequest CRD.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"params": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Parameters are the runtime attributes passed to the resolver to help it figure out how to resolve the resource being requested. For example: repo URL, commit SHA, path to file, the kind of authentication to leverage, etc.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"),
},
},
},
},
},
"url": {
SchemaProps: spec.SchemaProps{
Description: "URL is the runtime url passed to the resolver to help it figure out how to resolver the resource being requested. This is currently at an ALPHA stability level and subject to alpha API compatibility policies.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"},
}
}
func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResolutionRequestStatus are all the fields in a ResolutionRequest's status subresource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"observedGeneration": {
SchemaProps: spec.SchemaProps{
Description: "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.",
Type: []string{"integer"},
Format: "int64",
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Conditions the latest available observations of a resource's current state.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("knative.dev/pkg/apis.Condition"),
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"data": {
SchemaProps: spec.SchemaProps{
Description: "Data is a string representation of the resolved content of the requested resource in-lined into the ResolutionRequest object.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"source": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: Use RefSource instead",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"),
},
},
"refSource": {
SchemaProps: spec.SchemaProps{
Description: "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"),
},
},
},
Required: []string{"data", "source", "refSource"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource", "knative.dev/pkg/apis.Condition"},
}
}
func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatusFields(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResolutionRequestStatusFields are the ResolutionRequest-specific fields for the status subresource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"data": {
SchemaProps: spec.SchemaProps{
Description: "Data is a string representation of the resolved content of the requested resource in-lined into the ResolutionRequest object.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"source": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: Use RefSource instead",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"),
},
},
"refSource": {
SchemaProps: spec.SchemaProps{
Description: "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.",
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"),
},
},
},
Required: []string{"data", "source", "refSource"},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"},
}
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
)
func (p ParamSpec) convertTo(ctx context.Context, sink *v1.ParamSpec) {
sink.Name = p.Name
if p.Type != "" {
sink.Type = v1.ParamType(p.Type)
} else {
sink.Type = v1.ParamType(ParamTypeString)
}
sink.Description = p.Description
sink.Enum = p.Enum
var properties map[string]v1.PropertySpec
if p.Properties != nil {
properties = make(map[string]v1.PropertySpec)
}
for k, v := range p.Properties {
properties[k] = v1.PropertySpec{Type: v1.ParamType(v.Type)}
}
sink.Properties = properties
if p.Default != nil {
sink.Default = &v1.ParamValue{
Type: v1.ParamType(p.Default.Type), StringVal: p.Default.StringVal,
ArrayVal: p.Default.ArrayVal, ObjectVal: p.Default.ObjectVal,
}
}
}
func (p *ParamSpec) convertFrom(ctx context.Context, source v1.ParamSpec) {
p.Name = source.Name
if source.Type != "" {
p.Type = ParamType(source.Type)
} else {
p.Type = ParamTypeString
}
p.Description = source.Description
p.Enum = source.Enum
var properties map[string]PropertySpec
if source.Properties != nil {
properties = make(map[string]PropertySpec)
}
for k, v := range source.Properties {
properties[k] = PropertySpec{Type: ParamType(v.Type)}
}
p.Properties = properties
if source.Default != nil {
p.Default = &ParamValue{
Type: ParamType(source.Default.Type), StringVal: source.Default.StringVal,
ArrayVal: source.Default.ArrayVal, ObjectVal: source.Default.ObjectVal,
}
}
}
func (p Param) convertTo(ctx context.Context, sink *v1.Param) {
sink.Name = p.Name
newValue := v1.ParamValue{}
p.Value.convertTo(ctx, &newValue)
sink.Value = newValue
}
// ConvertFrom converts v1beta1 Param from v1 Param
func (p *Param) ConvertFrom(ctx context.Context, source v1.Param) {
p.Name = source.Name
newValue := ParamValue{}
newValue.convertFrom(ctx, source.Value)
p.Value = newValue
}
func (v ParamValue) convertTo(ctx context.Context, sink *v1.ParamValue) {
if v.Type != "" {
sink.Type = v1.ParamType(v.Type)
} else {
sink.Type = v1.ParamType(ParamTypeString)
}
sink.StringVal = v.StringVal
sink.ArrayVal = v.ArrayVal
sink.ObjectVal = v.ObjectVal
}
func (v *ParamValue) convertFrom(ctx context.Context, source v1.ParamValue) {
if source.Type != "" {
v.Type = ParamType(source.Type)
} else {
v.Type = ParamTypeString
}
v.StringVal = source.StringVal
v.ArrayVal = source.ArrayVal
v.ObjectVal = source.ObjectVal
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/substitution"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/strings/slices"
"knative.dev/pkg/apis"
)
// ParamsPrefix is the prefix used in $(...) expressions referring to parameters
const ParamsPrefix = "params"
// ParamSpec defines arbitrary parameters needed beyond typed inputs (such as
// resources). Parameter values are provided by users as inputs on a TaskRun
// or PipelineRun.
type ParamSpec struct {
// Name declares the name by which a parameter is referenced.
Name string `json:"name"`
// Type is the user-specified type of the parameter. The possible types
// are currently "string", "array" and "object", and "string" is the default.
// +optional
Type ParamType `json:"type,omitempty"`
// Description is a user-facing description of the parameter that may be
// used to populate a UI.
// +optional
Description string `json:"description,omitempty"`
// Properties is the JSON Schema properties to support key-value pairs parameter.
// +optional
Properties map[string]PropertySpec `json:"properties,omitempty"`
// Default is the value a parameter takes if no input value is supplied. If
// default is set, a Task may be executed without a supplied value for the
// parameter.
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Default *ParamValue `json:"default,omitempty"`
// Enum declares a set of allowed param input values for tasks/pipelines that can be validated.
// If Enum is not set, no input validation is performed for the param.
// +optional
Enum []string `json:"enum,omitempty"`
}
// ParamSpecs is a list of ParamSpec
// +listType=atomic
type ParamSpecs []ParamSpec
// PropertySpec defines the struct for object keys
type PropertySpec struct {
Type ParamType `json:"type,omitempty"`
}
// SetDefaults set the default type
func (pp *ParamSpec) SetDefaults(context.Context) {
if pp == nil {
return
}
// Propagate inferred type to the parent ParamSpec's type, and default type to the PropertySpec's type
// The sequence to look at is type in ParamSpec -> properties -> type in default -> array/string/object value in default
// If neither `properties` or `default` section is provided, ParamTypeString will be the default type.
switch {
case pp.Type != "":
// If param type is provided by the author, do nothing but just set default type for PropertySpec in case `properties` section is provided.
pp.setDefaultsForProperties()
case pp.Properties != nil:
pp.Type = ParamTypeObject
// Also set default type for PropertySpec
pp.setDefaultsForProperties()
case pp.Default == nil:
// ParamTypeString is the default value (when no type can be inferred from the default value)
pp.Type = ParamTypeString
case pp.Default.Type != "":
pp.Type = pp.Default.Type
case pp.Default.ArrayVal != nil:
pp.Type = ParamTypeArray
case pp.Default.ObjectVal != nil:
pp.Type = ParamTypeObject
default:
pp.Type = ParamTypeString
}
}
// getNames returns all the names of the declared parameters
func (ps ParamSpecs) getNames() []string {
var names []string
for _, p := range ps {
names = append(names, p.Name)
}
return names
}
// sortByType splits the input params into string params, array params, and object params, in that order
func (ps ParamSpecs) sortByType() (ParamSpecs, ParamSpecs, ParamSpecs) {
var stringParams, arrayParams, objectParams ParamSpecs
for _, p := range ps {
switch p.Type {
case ParamTypeArray:
arrayParams = append(arrayParams, p)
case ParamTypeObject:
objectParams = append(objectParams, p)
case ParamTypeString:
fallthrough
default:
stringParams = append(stringParams, p)
}
}
return stringParams, arrayParams, objectParams
}
// validateNoDuplicateNames returns an error if any of the params have the same name
func (ps ParamSpecs) validateNoDuplicateNames() *apis.FieldError {
var errs *apis.FieldError
names := ps.getNames()
for dup := range findDups(names) {
errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", dup))
}
return errs
}
// validateParamEnums validates feature flag, duplication and allowed types for Param Enum
func (ps ParamSpecs) validateParamEnums(ctx context.Context) *apis.FieldError {
var errs *apis.FieldError
for _, p := range ps {
if len(p.Enum) == 0 {
continue
}
if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableParamEnum {
errs = errs.Also(errs, apis.ErrGeneric(fmt.Sprintf("feature flag `%s` should be set to true to use Enum", config.EnableParamEnum), "").ViaKey(p.Name))
}
if p.Type != ParamTypeString {
errs = errs.Also(apis.ErrGeneric("enum can only be set with string type param", "").ViaKey(p.Name))
}
for dup := range findDups(p.Enum) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("parameter enum value %v appears more than once", dup), "").ViaKey(p.Name))
}
if p.Default != nil && p.Default.StringVal != "" {
if !slices.Contains(p.Enum, p.Default.StringVal) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("param default value %v not in the enum list", p.Default.StringVal), "").ViaKey(p.Name))
}
}
}
return errs
}
// findDups returns the duplicate element in the given slice
func findDups(vals []string) sets.String {
seen := sets.String{}
dups := sets.String{}
for _, val := range vals {
if seen.Has(val) {
dups.Insert(val)
}
seen.Insert(val)
}
return dups
}
// setDefaultsForProperties sets default type for PropertySpec (string) if it's not specified
func (pp *ParamSpec) setDefaultsForProperties() {
for key, propertySpec := range pp.Properties {
if propertySpec.Type == "" {
pp.Properties[key] = PropertySpec{Type: ParamTypeString}
}
}
}
// Param declares an ParamValues to use for the parameter called name.
type Param struct {
Name string `json:"name"`
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Value ParamValue `json:"value"`
}
// Params is a list of Param
// +listType=atomic
type Params []Param
// ExtractNames returns a set of unique names
func (ps Params) ExtractNames() sets.String {
names := sets.String{}
for _, p := range ps {
names.Insert(p.Name)
}
return names
}
func (ps Params) extractValues() []string {
pvs := []string{}
for i := range ps {
pvs = append(pvs, ps[i].Value.StringVal)
pvs = append(pvs, ps[i].Value.ArrayVal...)
for _, v := range ps[i].Value.ObjectVal {
pvs = append(pvs, v)
}
}
return pvs
}
// extractParamMapArrVals creates a param map with the key: param.Name and
// val: param.Value.ArrayVal
func (ps Params) extractParamMapArrVals() map[string][]string {
paramsMap := make(map[string][]string)
for _, p := range ps {
paramsMap[p.Name] = p.Value.ArrayVal
}
return paramsMap
}
// ExtractParamArrayLengths extract and return the lengths of all array params
// Example of returned value: {"a-array-params": 2,"b-array-params": 2 }
func (ps Params) ExtractParamArrayLengths() map[string]int {
// Collect all array params
arrayParamsLengths := make(map[string]int)
// Collect array params lengths from params
for _, p := range ps {
if p.Value.Type == ParamTypeArray {
arrayParamsLengths[p.Name] = len(p.Value.ArrayVal)
}
}
return arrayParamsLengths
}
// validateDuplicateParameters checks if a parameter with the same name is defined more than once
func (ps Params) validateDuplicateParameters() (errs *apis.FieldError) {
taskParamNames := sets.NewString()
for i, param := range ps {
if taskParamNames.Has(param.Name) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("parameter names must be unique,"+
" the parameter \"%s\" is also defined at", param.Name), fmt.Sprintf("[%d].name", i)))
}
taskParamNames.Insert(param.Name)
}
return errs
}
// ReplaceVariables applies string, array and object replacements to variables in Params
func (ps Params) ReplaceVariables(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) Params {
params := ps.DeepCopy()
for i := range params {
params[i].Value.ApplyReplacements(stringReplacements, arrayReplacements, objectReplacements)
}
return params
}
// ExtractDefaultParamArrayLengths extract and return the lengths of all array param defaults
// Example of returned value: {"a-array-params": 2,"b-array-params": 2 }
func (ps ParamSpecs) ExtractDefaultParamArrayLengths() map[string]int {
// Collect all array params
arrayParamsLengths := make(map[string]int)
// Collect array params lengths from defaults
for _, p := range ps {
if p.Default != nil {
if p.Default.Type == ParamTypeArray {
arrayParamsLengths[p.Name] = len(p.Default.ArrayVal)
}
}
}
return arrayParamsLengths
}
// extractArrayIndexingParamRefs takes a string of the form `foo-$(params.array-param[1])-bar` and extracts the portions of the string that reference an element in an array param.
// For example, for the string “foo-$(params.array-param[1])-bar-$(params.other-array-param[2])-$(params.string-param)`,
// it would return ["$(params.array-param[1])", "$(params.other-array-param[2])"].
func extractArrayIndexingParamRefs(paramReference string) []string {
l := []string{}
list := substitution.ExtractArrayIndexingParamsExpressions(paramReference)
for _, val := range list {
indexString := substitution.ExtractIndexString(val)
if indexString != "" {
l = append(l, val)
}
}
return l
}
// extractParamRefsFromSteps get all array indexing references from steps
func extractParamRefsFromSteps(steps []Step) []string {
paramsRefs := []string{}
for _, step := range steps {
paramsRefs = append(paramsRefs, step.Script)
container := step.ToK8sContainer()
paramsRefs = append(paramsRefs, extractParamRefsFromContainer(container)...)
}
return paramsRefs
}
// extractParamRefsFromStepTemplate get all array indexing references from StepsTemplate
func extractParamRefsFromStepTemplate(stepTemplate *StepTemplate) []string {
if stepTemplate == nil {
return nil
}
container := stepTemplate.ToK8sContainer()
return extractParamRefsFromContainer(container)
}
// extractParamRefsFromSidecars get all array indexing references from sidecars
func extractParamRefsFromSidecars(sidecars []Sidecar) []string {
paramsRefs := []string{}
for _, s := range sidecars {
paramsRefs = append(paramsRefs, s.Script)
container := s.ToK8sContainer()
paramsRefs = append(paramsRefs, extractParamRefsFromContainer(container)...)
}
return paramsRefs
}
// extractParamRefsFromVolumes get all array indexing references from volumes
func extractParamRefsFromVolumes(volumes []corev1.Volume) []string {
paramsRefs := []string{}
for i, v := range volumes {
paramsRefs = append(paramsRefs, v.Name)
if v.VolumeSource.ConfigMap != nil {
paramsRefs = append(paramsRefs, v.ConfigMap.Name)
for _, item := range v.ConfigMap.Items {
paramsRefs = append(paramsRefs, item.Key)
paramsRefs = append(paramsRefs, item.Path)
}
}
if v.VolumeSource.Secret != nil {
paramsRefs = append(paramsRefs, v.Secret.SecretName)
for _, item := range v.Secret.Items {
paramsRefs = append(paramsRefs, item.Key)
paramsRefs = append(paramsRefs, item.Path)
}
}
if v.PersistentVolumeClaim != nil {
paramsRefs = append(paramsRefs, v.PersistentVolumeClaim.ClaimName)
}
if v.Projected != nil {
for _, s := range volumes[i].Projected.Sources {
if s.ConfigMap != nil {
paramsRefs = append(paramsRefs, s.ConfigMap.Name)
}
if s.Secret != nil {
paramsRefs = append(paramsRefs, s.Secret.Name)
}
if s.ServiceAccountToken != nil {
paramsRefs = append(paramsRefs, s.ServiceAccountToken.Audience)
}
}
}
if v.CSI != nil {
if v.CSI.NodePublishSecretRef != nil {
paramsRefs = append(paramsRefs, v.CSI.NodePublishSecretRef.Name)
}
if v.CSI.VolumeAttributes != nil {
for _, value := range v.CSI.VolumeAttributes {
paramsRefs = append(paramsRefs, value)
}
}
}
}
return paramsRefs
}
// extractParamRefsFromContainer get all array indexing references from container
func extractParamRefsFromContainer(c *corev1.Container) []string {
paramsRefs := []string{}
paramsRefs = append(paramsRefs, c.Name)
paramsRefs = append(paramsRefs, c.Image)
paramsRefs = append(paramsRefs, string(c.ImagePullPolicy))
paramsRefs = append(paramsRefs, c.Args...)
for ie, e := range c.Env {
paramsRefs = append(paramsRefs, e.Value)
if c.Env[ie].ValueFrom != nil {
if e.ValueFrom.SecretKeyRef != nil {
paramsRefs = append(paramsRefs, e.ValueFrom.SecretKeyRef.LocalObjectReference.Name)
paramsRefs = append(paramsRefs, e.ValueFrom.SecretKeyRef.Key)
}
if e.ValueFrom.ConfigMapKeyRef != nil {
paramsRefs = append(paramsRefs, e.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name)
paramsRefs = append(paramsRefs, e.ValueFrom.ConfigMapKeyRef.Key)
}
}
}
for _, e := range c.EnvFrom {
paramsRefs = append(paramsRefs, e.Prefix)
if e.ConfigMapRef != nil {
paramsRefs = append(paramsRefs, e.ConfigMapRef.LocalObjectReference.Name)
}
if e.SecretRef != nil {
paramsRefs = append(paramsRefs, e.SecretRef.LocalObjectReference.Name)
}
}
paramsRefs = append(paramsRefs, c.WorkingDir)
paramsRefs = append(paramsRefs, c.Command...)
for _, v := range c.VolumeMounts {
paramsRefs = append(paramsRefs, v.Name)
paramsRefs = append(paramsRefs, v.MountPath)
paramsRefs = append(paramsRefs, v.SubPath)
}
return paramsRefs
}
// ParamType indicates the type of an input parameter;
// Used to distinguish between a single string and an array of strings.
type ParamType string
// Valid ParamTypes:
const (
ParamTypeString ParamType = "string"
ParamTypeArray ParamType = "array"
ParamTypeObject ParamType = "object"
)
// AllParamTypes can be used for ParamType validation.
var AllParamTypes = []ParamType{ParamTypeString, ParamTypeArray, ParamTypeObject}
// ParamValues is modeled after IntOrString in kubernetes/apimachinery:
// ParamValue is a type that can hold a single string or string array.
// Used in JSON unmarshalling so that a single JSON field can accept
// either an individual string or an array of strings.
type ParamValue struct {
Type ParamType // Represents the stored type of ParamValues.
StringVal string
// +listType=atomic
ArrayVal []string
ObjectVal map[string]string
}
// ArrayOrString is deprecated, this is to keep backward compatibility
//
// Deprecated: Use ParamValue instead.
type ArrayOrString = ParamValue
// UnmarshalJSON implements the json.Unmarshaller interface.
func (paramValues *ParamValue) UnmarshalJSON(value []byte) error {
// ParamValues is used for Results Value as well, the results can be any kind of
// data so we need to check if it is empty.
if len(value) == 0 {
paramValues.Type = ParamTypeString
return nil
}
if value[0] == '[' {
// We're trying to Unmarshal to []string, but for cases like []int or other types
// of nested array which we don't support yet, we should continue and Unmarshal
// it to String. If the Type being set doesn't match what it actually should be,
// it will be captured by validation in reconciler.
// if failed to unmarshal to array, we will convert the value to string and marshal it to string
var a []string
if err := json.Unmarshal(value, &a); err == nil {
paramValues.Type = ParamTypeArray
paramValues.ArrayVal = a
return nil
}
}
if value[0] == '{' {
// if failed to unmarshal to map, we will convert the value to string and marshal it to string
var m map[string]string
if err := json.Unmarshal(value, &m); err == nil {
paramValues.Type = ParamTypeObject
paramValues.ObjectVal = m
return nil
}
}
// By default we unmarshal to string
paramValues.Type = ParamTypeString
if err := json.Unmarshal(value, ¶mValues.StringVal); err == nil {
return nil
}
paramValues.StringVal = string(value)
return nil
}
// MarshalJSON implements the json.Marshaller interface.
func (paramValues ParamValue) MarshalJSON() ([]byte, error) {
switch paramValues.Type {
case ParamTypeString:
return json.Marshal(paramValues.StringVal)
case ParamTypeArray:
return json.Marshal(paramValues.ArrayVal)
case ParamTypeObject:
return json.Marshal(paramValues.ObjectVal)
default:
return []byte{}, fmt.Errorf("impossible ParamValues.Type: %q", paramValues.Type)
}
}
// ApplyReplacements applyes replacements for ParamValues type
func (paramValues *ParamValue) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) {
switch paramValues.Type {
case ParamTypeArray:
newArrayVal := []string{}
for _, v := range paramValues.ArrayVal {
newArrayVal = append(newArrayVal, substitution.ApplyArrayReplacements(v, stringReplacements, arrayReplacements)...)
}
paramValues.ArrayVal = newArrayVal
case ParamTypeObject:
newObjectVal := map[string]string{}
for k, v := range paramValues.ObjectVal {
newObjectVal[k] = substitution.ApplyReplacements(v, stringReplacements)
}
paramValues.ObjectVal = newObjectVal
case ParamTypeString:
fallthrough
default:
paramValues.applyOrCorrect(stringReplacements, arrayReplacements, objectReplacements)
}
}
// applyOrCorrect deals with string param whose value can be string literal or a reference to a string/array/object param/result.
// If the value of paramValues is a reference to array or object, the type will be corrected from string to array/object.
func (paramValues *ParamValue) applyOrCorrect(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) {
stringVal := paramValues.StringVal
// if the stringVal is a string literal or a string that mixed with var references
// just do the normal string replacement
if !exactVariableSubstitutionRegex.MatchString(stringVal) {
paramValues.StringVal = substitution.ApplyReplacements(paramValues.StringVal, stringReplacements)
return
}
// trim the head "$(" and the tail ")" or "[*])"
// i.e. get "params.name" from "$(params.name)" or "$(params.name[*])"
trimedStringVal := substitution.StripStarVarSubExpression(stringVal)
// if the stringVal is a reference to a string param
if _, ok := stringReplacements[trimedStringVal]; ok {
paramValues.StringVal = substitution.ApplyReplacements(paramValues.StringVal, stringReplacements)
}
// if the stringVal is a reference to an array param, we need to change the type other than apply replacement
if _, ok := arrayReplacements[trimedStringVal]; ok {
paramValues.StringVal = ""
paramValues.ArrayVal = substitution.ApplyArrayReplacements(stringVal, stringReplacements, arrayReplacements)
paramValues.Type = ParamTypeArray
}
// if the stringVal is a reference an object param, we need to change the type other than apply replacement
if _, ok := objectReplacements[trimedStringVal]; ok {
paramValues.StringVal = ""
paramValues.ObjectVal = objectReplacements[trimedStringVal]
paramValues.Type = ParamTypeObject
}
}
// NewStructuredValues creates an ParamValues of type ParamTypeString or ParamTypeArray, based on
// how many inputs are given (>1 input will create an array, not string).
func NewStructuredValues(value string, values ...string) *ParamValue {
if len(values) > 0 {
return &ParamValue{
Type: ParamTypeArray,
ArrayVal: append([]string{value}, values...),
}
}
return &ParamValue{
Type: ParamTypeString,
StringVal: value,
}
}
// NewArrayOrString is the deprecated, this is to keep backward compatibility
var NewArrayOrString = NewStructuredValues
// NewObject creates an ParamValues of type ParamTypeObject using the provided key-value pairs
func NewObject(pairs map[string]string) *ParamValue {
return &ParamValue{
Type: ParamTypeObject,
ObjectVal: pairs,
}
}
// ArrayReference returns the name of the parameter from array parameter reference
// returns arrayParam from $(params.arrayParam[*])
func ArrayReference(a string) string {
return strings.TrimSuffix(strings.TrimPrefix(a, "$("+ParamsPrefix+"."), "[*])")
}
// validatePipelineParametersVariablesInTaskParameters validates param value that
// may contain the reference(s) to other params to make sure those references are used appropriately.
func validatePipelineParametersVariablesInTaskParameters(params Params, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
errs = errs.Also(params.validateDuplicateParameters()).ViaField("params")
for _, param := range params {
switch param.Value.Type {
case ParamTypeArray:
for idx, arrayElement := range param.Value.ArrayVal {
errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("params", param.Name))
}
case ParamTypeObject:
for key, val := range param.Value.ObjectVal {
errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldKey("properties", key).ViaFieldKey("params", param.Name))
}
case ParamTypeString:
fallthrough
default:
errs = errs.Also(validateParamStringValue(param, prefix, paramNames, arrayParamNames, objectParamNameKeys))
}
}
return errs
}
// validateParamStringValue validates the param value field of string type
// that may contain references to other isolated array/object params other than string param.
func validateParamStringValue(param Param, prefix string, paramNames sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
stringValue := param.Value.StringVal
// if the provided param value is an isolated reference to the whole array/object, we just check if the param name exists.
isIsolated, errs := substitution.ValidateWholeArrayOrObjectRefInStringVariable(param.Name, stringValue, prefix, paramNames)
if isIsolated {
return errs
}
// if the provided param value is string literal and/or contains multiple variables
// valid example: "$(params.myString) and another $(params.myObject.key1)"
// invalid example: "$(params.myString) and another $(params.myObject[*])"
return validateStringVariable(stringValue, prefix, paramNames, arrayVars, objectParamNameKeys).ViaFieldKey("params", param.Name)
}
// validateStringVariable validates the normal string fields that can only accept references to string param or individual keys of object param
func validateStringVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError {
errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars)
errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys))
return errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(value, prefix, arrayVars))
}
func validateArrayVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError {
errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars)
errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys))
return errs.Also(substitution.ValidateVariableReferenceIsIsolated(value, prefix, arrayVars))
}
func validateObjectVariable(value, prefix string, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
objectNames := sets.NewString()
for objectParamName, keys := range objectParamNameKeys {
objectNames.Insert(objectParamName)
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...)))
}
return errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(value, prefix, objectNames))
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/version"
"knative.dev/pkg/apis"
)
var _ apis.Convertible = (*Pipeline)(nil)
// ConvertTo implements apis.Convertible
func (p *Pipeline) ConvertTo(ctx context.Context, to apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
switch sink := to.(type) {
case *v1.Pipeline:
sink.ObjectMeta = p.ObjectMeta
if err := serializePipelineResources(&sink.ObjectMeta, &p.Spec); err != nil {
return err
}
return p.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta)
default:
return fmt.Errorf("unknown version, got: %T", sink)
}
}
// ConvertTo implements apis.Convertible
func (ps *PipelineSpec) ConvertTo(ctx context.Context, sink *v1.PipelineSpec, meta *metav1.ObjectMeta) error {
sink.DisplayName = ps.DisplayName
sink.Description = ps.Description
sink.Tasks = nil
for _, t := range ps.Tasks {
new := v1.PipelineTask{}
err := t.convertTo(ctx, &new, meta)
if err != nil {
return err
}
sink.Tasks = append(sink.Tasks, new)
}
sink.Params = nil
for _, p := range ps.Params {
new := v1.ParamSpec{}
p.convertTo(ctx, &new)
sink.Params = append(sink.Params, new)
}
sink.Workspaces = nil
for _, w := range ps.Workspaces {
new := v1.PipelineWorkspaceDeclaration{}
w.convertTo(ctx, &new)
sink.Workspaces = append(sink.Workspaces, new)
}
sink.Results = nil
for _, r := range ps.Results {
new := v1.PipelineResult{}
r.convertTo(ctx, &new)
sink.Results = append(sink.Results, new)
}
sink.Finally = nil
for _, f := range ps.Finally {
new := v1.PipelineTask{}
err := f.convertTo(ctx, &new, meta)
if err != nil {
return err
}
sink.Finally = append(sink.Finally, new)
}
return nil
}
// ConvertFrom implements apis.Convertible
func (p *Pipeline) ConvertFrom(ctx context.Context, from apis.Convertible) error {
switch source := from.(type) {
case *v1.Pipeline:
p.ObjectMeta = source.ObjectMeta
if err := deserializePipelineResources(&p.ObjectMeta, &p.Spec); err != nil {
return err
}
return p.Spec.ConvertFrom(ctx, &source.Spec, &p.ObjectMeta)
default:
return fmt.Errorf("unknown version, got: %T", p)
}
}
// ConvertFrom implements apis.Convertible
func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source *v1.PipelineSpec, meta *metav1.ObjectMeta) error {
ps.DisplayName = source.DisplayName
ps.Description = source.Description
ps.Tasks = nil
for _, t := range source.Tasks {
new := PipelineTask{}
err := new.convertFrom(ctx, t, meta)
if err != nil {
return err
}
ps.Tasks = append(ps.Tasks, new)
}
ps.Params = nil
for _, p := range source.Params {
new := ParamSpec{}
new.convertFrom(ctx, p)
ps.Params = append(ps.Params, new)
}
ps.Workspaces = nil
for _, w := range source.Workspaces {
new := PipelineWorkspaceDeclaration{}
new.convertFrom(ctx, w)
ps.Workspaces = append(ps.Workspaces, new)
}
ps.Results = nil
for _, r := range source.Results {
new := PipelineResult{}
new.convertFrom(ctx, r)
ps.Results = append(ps.Results, new)
}
ps.Finally = nil
for _, f := range source.Finally {
new := PipelineTask{}
err := new.convertFrom(ctx, f, meta)
if err != nil {
return err
}
ps.Finally = append(ps.Finally, new)
}
return nil
}
func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask, meta *metav1.ObjectMeta) error {
sink.Name = pt.Name
sink.DisplayName = pt.DisplayName
sink.Description = pt.Description
if pt.TaskRef != nil {
sink.TaskRef = &v1.TaskRef{}
pt.TaskRef.convertTo(ctx, sink.TaskRef)
}
if pt.TaskSpec != nil {
sink.TaskSpec = &v1.EmbeddedTask{}
err := pt.TaskSpec.convertTo(ctx, sink.TaskSpec, meta, pt.Name)
if err != nil {
return err
}
}
sink.When = nil
for _, we := range pt.WhenExpressions {
new := v1.WhenExpression{}
we.convertTo(ctx, &new)
sink.When = append(sink.When, new)
}
sink.OnError = (v1.PipelineTaskOnErrorType)(pt.OnError)
sink.Retries = pt.Retries
sink.RunAfter = pt.RunAfter
sink.Params = nil
for _, p := range pt.Params {
new := v1.Param{}
p.convertTo(ctx, &new)
sink.Params = append(sink.Params, new)
}
sink.Matrix = nil
if pt.IsMatrixed() {
new := v1.Matrix{}
pt.Matrix.convertTo(ctx, &new)
sink.Matrix = &new
}
sink.Workspaces = nil
for _, w := range pt.Workspaces {
new := v1.WorkspacePipelineTaskBinding{}
w.convertTo(ctx, &new)
sink.Workspaces = append(sink.Workspaces, new)
}
sink.Timeout = pt.Timeout
return nil
}
func (pt *PipelineTask) convertFrom(ctx context.Context, source v1.PipelineTask, meta *metav1.ObjectMeta) error {
pt.Name = source.Name
pt.DisplayName = source.DisplayName
pt.Description = source.Description
if source.TaskRef != nil {
newTaskRef := TaskRef{}
newTaskRef.ConvertFrom(ctx, *source.TaskRef)
pt.TaskRef = &newTaskRef
}
if source.TaskSpec != nil {
newTaskSpec := EmbeddedTask{}
err := newTaskSpec.convertFrom(ctx, *source.TaskSpec, meta, pt.Name)
pt.TaskSpec = &newTaskSpec
if err != nil {
return err
}
}
pt.WhenExpressions = nil
for _, we := range source.When {
new := WhenExpression{}
new.convertFrom(ctx, we)
pt.WhenExpressions = append(pt.WhenExpressions, new)
}
pt.OnError = (PipelineTaskOnErrorType)(source.OnError)
pt.Retries = source.Retries
pt.RunAfter = source.RunAfter
pt.Params = nil
for _, p := range source.Params {
new := Param{}
new.ConvertFrom(ctx, p)
pt.Params = append(pt.Params, new)
}
pt.Matrix = nil
if source.IsMatrixed() {
new := Matrix{}
new.convertFrom(ctx, *source.Matrix)
pt.Matrix = &new
}
pt.Workspaces = nil
for _, w := range source.Workspaces {
new := WorkspacePipelineTaskBinding{}
new.convertFrom(ctx, w)
pt.Workspaces = append(pt.Workspaces, new)
}
pt.Timeout = source.Timeout
return nil
}
func (et EmbeddedTask) convertTo(ctx context.Context, sink *v1.EmbeddedTask, meta *metav1.ObjectMeta, taskName string) error {
sink.TypeMeta = et.TypeMeta
sink.Spec = et.Spec
sink.Metadata = v1.PipelineTaskMetadata(et.Metadata)
sink.TaskSpec = v1.TaskSpec{}
return et.TaskSpec.ConvertTo(ctx, &sink.TaskSpec, meta, taskName)
}
func (et *EmbeddedTask) convertFrom(ctx context.Context, source v1.EmbeddedTask, meta *metav1.ObjectMeta, taskName string) error {
et.TypeMeta = source.TypeMeta
et.Spec = source.Spec
et.Metadata = PipelineTaskMetadata(source.Metadata)
et.TaskSpec = TaskSpec{}
return et.TaskSpec.ConvertFrom(ctx, &source.TaskSpec, meta, taskName)
}
func (we WhenExpression) convertTo(ctx context.Context, sink *v1.WhenExpression) {
sink.Input = we.Input
sink.Operator = we.Operator
sink.Values = we.Values
sink.CEL = we.CEL
}
func (we *WhenExpression) convertFrom(ctx context.Context, source v1.WhenExpression) {
we.Input = source.Input
we.Operator = source.Operator
we.Values = source.Values
we.CEL = source.CEL
}
func (m *Matrix) convertTo(ctx context.Context, sink *v1.Matrix) {
for _, param := range m.Params {
new := v1.Param{}
param.convertTo(ctx, &new)
sink.Params = append(sink.Params, new)
}
for i, include := range m.Include {
sink.Include = append(sink.Include, v1.IncludeParams{Name: include.Name})
for _, param := range include.Params {
newIncludeParam := v1.Param{}
param.convertTo(ctx, &newIncludeParam)
sink.Include[i].Params = append(sink.Include[i].Params, newIncludeParam)
}
}
}
func (m *Matrix) convertFrom(ctx context.Context, source v1.Matrix) {
for _, param := range source.Params {
new := Param{}
new.ConvertFrom(ctx, param)
m.Params = append(m.Params, new)
}
for i, include := range source.Include {
m.Include = append(m.Include, IncludeParams{Name: include.Name})
for _, p := range include.Params {
new := Param{}
new.ConvertFrom(ctx, p)
m.Include[i].Params = append(m.Include[i].Params, new)
}
}
}
func (pr PipelineResult) convertTo(ctx context.Context, sink *v1.PipelineResult) {
sink.Name = pr.Name
sink.Type = v1.ResultsType(pr.Type)
sink.Description = pr.Description
newValue := v1.ParamValue{}
pr.Value.convertTo(ctx, &newValue)
sink.Value = newValue
}
func (pr *PipelineResult) convertFrom(ctx context.Context, source v1.PipelineResult) {
pr.Name = source.Name
pr.Type = ResultsType(source.Type)
pr.Description = source.Description
newValue := ParamValue{}
newValue.convertFrom(ctx, source.Value)
pr.Value = newValue
}
func (ptm PipelineTaskMetadata) convertTo(ctx context.Context, sink *v1.PipelineTaskMetadata) {
sink.Labels = ptm.Labels
sink.Annotations = ptm.Annotations
}
func (ptm *PipelineTaskMetadata) convertFrom(ctx context.Context, source v1.PipelineTaskMetadata) {
ptm.Labels = source.Labels
ptm.Annotations = source.Labels
}
func serializePipelineResources(meta *metav1.ObjectMeta, spec *PipelineSpec) error {
if spec.Resources == nil {
return nil
}
return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey)
}
func deserializePipelineResources(meta *metav1.ObjectMeta, spec *PipelineSpec) error {
resources := &[]PipelineDeclaredResource{}
err := version.DeserializeFromMetadata(meta, resources, resourcesAnnotationKey)
if err != nil {
return err
}
if len(*resources) != 0 {
spec.Resources = *resources
}
return nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"github.com/tektoncd/pipeline/pkg/apis/config"
"knative.dev/pkg/apis"
)
var _ apis.Defaultable = (*Pipeline)(nil)
// SetDefaults sets default values on the Pipeline's Spec
func (p *Pipeline) SetDefaults(ctx context.Context) {
p.Spec.SetDefaults(ctx)
}
// SetDefaults sets default values for the PipelineSpec's Params, Tasks, and Finally
func (ps *PipelineSpec) SetDefaults(ctx context.Context) {
for i := range ps.Params {
ps.Params[i].SetDefaults(ctx)
}
for _, pt := range ps.Tasks {
pt.SetDefaults(ctx)
}
for _, ft := range ps.Finally {
ctx := ctx // Ensure local scoping per Task
ft.SetDefaults(ctx)
}
}
// SetDefaults sets default values for a PipelineTask
func (pt *PipelineTask) SetDefaults(ctx context.Context) {
cfg := config.FromContextOrDefaults(ctx)
if pt.TaskRef != nil {
if pt.TaskRef.Name == "" && pt.TaskRef.Resolver == "" {
pt.TaskRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType)
}
if pt.TaskRef.Kind == "" && pt.TaskRef.Resolver == "" {
pt.TaskRef.Kind = NamespacedTaskKind
}
}
if pt.TaskSpec != nil {
pt.TaskSpec.SetDefaults(ctx)
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum"
"github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/kmeta"
)
// PipelineTaskOnErrorType defines a list of supported failure handling behaviors of a PipelineTask on error
type PipelineTaskOnErrorType string
const (
// PipelineTasksAggregateStatus is a param representing aggregate status of all dag pipelineTasks
PipelineTasksAggregateStatus = "tasks.status"
// PipelineTasks is a value representing a task is a member of "tasks" section of the pipeline
PipelineTasks = "tasks"
// PipelineFinallyTasks is a value representing a task is a member of "finally" section of the pipeline
PipelineFinallyTasks = "finally"
// PipelineTaskStopAndFail indicates to stop and fail the PipelineRun if the PipelineTask fails
PipelineTaskStopAndFail PipelineTaskOnErrorType = "stopAndFail"
// PipelineTaskContinue indicates to continue executing the rest of the DAG when the PipelineTask fails
PipelineTaskContinue PipelineTaskOnErrorType = "continue"
)
// +genclient
// +genclient:noStatus
// +genreconciler:krshapedlogic=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// Pipeline describes a list of Tasks to execute. It expresses how outputs
// of tasks feed into inputs of subsequent tasks.
//
// Deprecated: Please use v1.Pipeline instead.
type Pipeline struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec holds the desired state of the Pipeline from the client
// +optional
Spec PipelineSpec `json:"spec"`
}
var _ kmeta.OwnerRefable = (*Pipeline)(nil)
// PipelineMetadata returns the Pipeline's ObjectMeta, implementing PipelineObject
func (p *Pipeline) PipelineMetadata() metav1.ObjectMeta {
return p.ObjectMeta
}
// PipelineSpec returns the Pipeline's Spec, implementing PipelineObject
func (p *Pipeline) PipelineSpec() PipelineSpec {
return p.Spec
}
// Copy returns a deep copy of the Pipeline, implementing PipelineObject
func (p *Pipeline) Copy() PipelineObject {
return p.DeepCopy()
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*Pipeline) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind(pipeline.PipelineControllerName)
}
// Checksum computes the sha256 checksum of the task object.
// Prior to computing the checksum, it performs some preprocessing on the
// metadata of the object where it removes system provided annotations.
// Only the name, namespace, generateName, user-provided labels and annotations
// and the pipelineSpec are included for the checksum computation.
func (p *Pipeline) Checksum() ([]byte, error) {
objectMeta := checksum.PrepareObjectMeta(p)
preprocessedPipeline := Pipeline{
TypeMeta: metav1.TypeMeta{
APIVersion: "tekton.dev/v1beta1",
Kind: "Pipeline"},
ObjectMeta: objectMeta,
Spec: p.Spec,
}
sha256Checksum, err := checksum.ComputeSha256Checksum(preprocessedPipeline)
if err != nil {
return nil, err
}
return sha256Checksum, nil
}
// PipelineSpec defines the desired state of Pipeline.
type PipelineSpec struct {
// DisplayName is a user-facing name of the pipeline that may be
// used to populate a UI.
// +optional
DisplayName string `json:"displayName,omitempty"`
// Description is a user-facing description of the pipeline that may be
// used to populate a UI.
// +optional
Description string `json:"description,omitempty"`
// Deprecated: Unused, preserved only for backwards compatibility
// +listType=atomic
Resources []PipelineDeclaredResource `json:"resources,omitempty"`
// Tasks declares the graph of Tasks that execute when this Pipeline is run.
// +listType=atomic
Tasks []PipelineTask `json:"tasks,omitempty"`
// Params declares a list of input parameters that must be supplied when
// this Pipeline is run.
Params ParamSpecs `json:"params,omitempty"`
// Workspaces declares a set of named workspaces that are expected to be
// provided by a PipelineRun.
// +optional
// +listType=atomic
Workspaces []PipelineWorkspaceDeclaration `json:"workspaces,omitempty"`
// Results are values that this pipeline can output once run
// +optional
// +listType=atomic
Results []PipelineResult `json:"results,omitempty"`
// Finally declares the list of Tasks that execute just before leaving the Pipeline
// i.e. either after all Tasks are finished executing successfully
// or after a failure which would result in ending the Pipeline
// +listType=atomic
Finally []PipelineTask `json:"finally,omitempty"`
}
// PipelineResult used to describe the results of a pipeline
type PipelineResult struct {
// Name the given name
Name string `json:"name"`
// Type is the user-specified type of the result.
// The possible types are 'string', 'array', and 'object', with 'string' as the default.
// 'array' and 'object' types are alpha features.
Type ResultsType `json:"type,omitempty"`
// Description is a human-readable description of the result
// +optional
Description string `json:"description"`
// Value the expression used to retrieve the value
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Value ResultValue `json:"value"`
}
// PipelineTaskMetadata contains the labels or annotations for an EmbeddedTask
type PipelineTaskMetadata struct {
// +optional
Labels map[string]string `json:"labels,omitempty"`
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
}
// EmbeddedTask is used to define a Task inline within a Pipeline's PipelineTasks.
type EmbeddedTask struct {
// +optional
runtime.TypeMeta `json:",inline,omitempty"`
// Spec is a specification of a custom task
// +optional
Spec runtime.RawExtension `json:"spec,omitempty"`
// +optional
Metadata PipelineTaskMetadata `json:"metadata,omitempty"`
// TaskSpec is a specification of a task
// +optional
TaskSpec `json:",inline,omitempty"`
}
// PipelineTask defines a task in a Pipeline, passing inputs from both
// Params and from the output of previous tasks.
type PipelineTask struct {
// Name is the name of this task within the context of a Pipeline. Name is
// used as a coordinate with the `from` and `runAfter` fields to establish
// the execution order of tasks relative to one another.
Name string `json:"name,omitempty"`
// DisplayName is the display name of this task within the context of a Pipeline.
// This display name may be used to populate a UI.
// +optional
DisplayName string `json:"displayName,omitempty"`
// Description is the description of this task within the context of a Pipeline.
// This description may be used to populate a UI.
// +optional
Description string `json:"description,omitempty"`
// TaskRef is a reference to a task definition.
// +optional
TaskRef *TaskRef `json:"taskRef,omitempty"`
// TaskSpec is a specification of a task
// Specifying TaskSpec can be disabled by setting
// `disable-inline-spec` feature flag.
// See Task.spec (API version: tekton.dev/v1beta1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
TaskSpec *EmbeddedTask `json:"taskSpec,omitempty"`
// WhenExpressions is a list of when expressions that need to be true for the task to run
// +optional
WhenExpressions WhenExpressions `json:"when,omitempty"`
// Retries represents how many times this task should be retried in case of task failure: ConditionSucceeded set to False
// +optional
Retries int `json:"retries,omitempty"`
// RunAfter is the list of PipelineTask names that should be executed before
// this Task executes. (Used to force a specific ordering in graph execution.)
// +optional
// +listType=atomic
RunAfter []string `json:"runAfter,omitempty"`
// Deprecated: Unused, preserved only for backwards compatibility
// +optional
Resources *PipelineTaskResources `json:"resources,omitempty"`
// Parameters declares parameters passed to this task.
// +optional
Params Params `json:"params,omitempty"`
// Matrix declares parameters used to fan out this task.
// +optional
Matrix *Matrix `json:"matrix,omitempty"`
// Workspaces maps workspaces from the pipeline spec to the workspaces
// declared in the Task.
// +optional
// +listType=atomic
Workspaces []WorkspacePipelineTaskBinding `json:"workspaces,omitempty"`
// Duration after which the TaskRun times out. Defaults to 1 hour.
// Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// PipelineRef is a reference to a pipeline definition
// Note: PipelineRef is in preview mode and not yet supported
// +optional
PipelineRef *PipelineRef `json:"pipelineRef,omitempty"`
// PipelineSpec is a specification of a pipeline
// Note: PipelineSpec is in preview mode and not yet supported
// Specifying PipelineSpec can be disabled by setting
// `disable-inline-spec` feature flag.
// See Pipeline.spec (API version: tekton.dev/v1beta1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
PipelineSpec *PipelineSpec `json:"pipelineSpec,omitempty"`
// OnError defines the exiting behavior of a PipelineRun on error
// can be set to [ continue | stopAndFail ]
// +optional
OnError PipelineTaskOnErrorType `json:"onError,omitempty"`
}
// IsCustomTask checks whether an embedded TaskSpec is a Custom Task
func (et *EmbeddedTask) IsCustomTask() bool {
// Note that if `apiVersion` is set to `"tekton.dev/v1beta1"` and `kind` is set to `"Task"`,
// the reference will be considered a Custom Task - https://github.com/tektoncd/pipeline/issues/6457
return et != nil && et.APIVersion != "" && et.Kind != ""
}
// IsMatrixed return whether pipeline task is matrixed
func (pt *PipelineTask) IsMatrixed() bool {
return pt.Matrix.HasParams() || pt.Matrix.HasInclude()
}
// TaskSpecMetadata returns the metadata of the PipelineTask's EmbeddedTask spec.
func (pt *PipelineTask) TaskSpecMetadata() PipelineTaskMetadata {
return pt.TaskSpec.Metadata
}
// HashKey is the name of the PipelineTask, and is used as the key for this PipelineTask in the DAG
func (pt PipelineTask) HashKey() string {
return pt.Name
}
// Deps returns all other PipelineTask dependencies of this PipelineTask, based on resource usage or ordering
func (pt PipelineTask) Deps() []string {
// hold the list of dependencies in a set to avoid duplicates
deps := sets.NewString()
// add any new dependents from result references - resource dependency
for _, ref := range PipelineTaskResultRefs(&pt) {
deps.Insert(ref.PipelineTask)
}
// add any new dependents from runAfter - order dependency
for _, runAfter := range pt.RunAfter {
deps.Insert(runAfter)
}
return deps.List()
}
// PipelineTaskList is a list of PipelineTasks
type PipelineTaskList []PipelineTask
// Deps returns a map with key as name of a pipelineTask and value as a list of its dependencies
func (l PipelineTaskList) Deps() map[string][]string {
deps := map[string][]string{}
for _, pt := range l {
// get the list of deps for this pipelineTask
d := pt.Deps()
// add the pipelineTask into the map if it has any deps
if len(d) > 0 {
deps[pt.HashKey()] = d
}
}
return deps
}
// Items returns a slice of all tasks in the PipelineTaskList, converted to dag.Tasks
func (l PipelineTaskList) Items() []dag.Task {
tasks := []dag.Task{}
for _, t := range l {
tasks = append(tasks, dag.Task(t))
}
return tasks
}
// Names returns a set of pipeline task names from the given list of pipeline tasks
func (l PipelineTaskList) Names() sets.String {
names := sets.String{}
for _, pt := range l {
names.Insert(pt.Name)
}
return names
}
// PipelineTaskParam is used to provide arbitrary string parameters to a Task.
type PipelineTaskParam struct {
Name string `json:"name"`
Value string `json:"value"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PipelineList contains a list of Pipeline
type PipelineList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []Pipeline `json:"items"`
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"strings"
"github.com/tektoncd/pipeline/internal/artifactref"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
"github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag"
"github.com/tektoncd/pipeline/pkg/substitution"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/utils/strings/slices"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var (
_ apis.Validatable = (*Pipeline)(nil)
_ resourcesemantics.VerbLimited = (*Pipeline)(nil)
)
const (
taskRef = "taskRef"
taskSpec = "taskSpec"
pipelineRef = "pipelineRef"
pipelineSpec = "pipelineSpec"
)
// SupportedVerbs returns the operations that validation should be called for
func (p *Pipeline) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate checks that the Pipeline structure is valid but does not validate
// that any references resources exist, that is done at run time.
func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError {
errs := validate.ObjectMetadata(p.GetObjectMeta()).ViaField("metadata")
errs = errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec"))
// When a Pipeline is created directly, instead of declared inline in a PipelineRun,
// we do not support propagated parameters and workspaces.
// Validate that all params and workspaces it uses are declared.
errs = errs.Also(p.Spec.validatePipelineParameterUsage(ctx).ViaField("spec"))
return errs.Also(p.Spec.validatePipelineWorkspacesUsage().ViaField("spec"))
}
// Validate checks that taskNames in the Pipeline are valid and that the graph
// of Tasks expressed in the Pipeline makes sense.
func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(ps.ValidateBetaFields(ctx))
if equality.Semantic.DeepEqual(ps, &PipelineSpec{}) {
errs = errs.Also(apis.ErrGeneric("expected at least one, got none", "description", "params", "resources", "tasks", "workspaces"))
}
// PipelineTask must have a valid unique label and at least one of taskRef or taskSpec should be specified
errs = errs.Also(ValidatePipelineTasks(ctx, ps.Tasks, ps.Finally))
if len(ps.Resources) > 0 {
errs = errs.Also(apis.ErrDisallowedFields("resources"))
}
// Validate the pipeline task graph
errs = errs.Also(validateGraph(ps.Tasks))
// The parameter variables should be valid
errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.Tasks, ps.Params).ViaField("tasks"))
errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.Finally, ps.Params).ViaField("finally"))
errs = errs.Also(validatePipelineContextVariables(ps.Tasks).ViaField("tasks"))
errs = errs.Also(validatePipelineContextVariables(ps.Finally).ViaField("finally"))
errs = errs.Also(validateExecutionStatusVariables(ps.Tasks, ps.Finally))
// Validate the pipeline's workspaces.
errs = errs.Also(validatePipelineWorkspacesDeclarations(ps.Workspaces))
// Validate the pipeline's results
errs = errs.Also(validatePipelineResults(ps.Results, ps.Tasks, ps.Finally))
errs = errs.Also(validateTasksAndFinallySection(ps))
errs = errs.Also(validateFinalTasks(ps.Tasks, ps.Finally))
errs = errs.Also(validateWhenExpressions(ctx, ps.Tasks, ps.Finally))
errs = errs.Also(validateArtifactReference(ctx, ps.Tasks, ps.Finally))
errs = errs.Also(validateMatrix(ctx, ps.Tasks).ViaField("tasks"))
errs = errs.Also(validateMatrix(ctx, ps.Finally).ViaField("finally"))
return errs
}
// ValidateBetaFields returns an error if the PipelineSpec uses beta specifications governed by
// `enable-api-fields` but does not have "enable-api-fields" set to "alpha" or "beta".
func (ps *PipelineSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError {
var errs *apis.FieldError
for i, pt := range ps.Tasks {
errs = errs.Also(pt.validateBetaFields(ctx).ViaFieldIndex("tasks", i))
}
for i, pt := range ps.Finally {
errs = errs.Also(pt.validateBetaFields(ctx).ViaFieldIndex("finally", i))
}
return errs
}
// validateBetaFields returns an error if the PipelineTask uses beta features but does not
// have "enable-api-fields" set to "alpha" or "beta".
func (pt *PipelineTask) validateBetaFields(ctx context.Context) *apis.FieldError {
var errs *apis.FieldError
if pt.TaskRef != nil {
// Resolvers
if pt.TaskRef.Resolver != "" {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "taskref.resolver", config.BetaAPIFields))
}
if len(pt.TaskRef.Params) > 0 {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "taskref.params", config.BetaAPIFields))
}
}
return errs
}
// ValidatePipelineTasks ensures that pipeline tasks has unique label, pipeline tasks has specified one of
// taskRef or taskSpec, and in case of a pipeline task with taskRef, it has a reference to a valid task (task name)
func ValidatePipelineTasks(ctx context.Context, tasks []PipelineTask, finalTasks []PipelineTask) *apis.FieldError {
taskNames := sets.NewString()
var errs *apis.FieldError
errs = errs.Also(PipelineTaskList(tasks).Validate(ctx, taskNames, "tasks"))
errs = errs.Also(PipelineTaskList(finalTasks).Validate(ctx, taskNames, "finally"))
return errs
}
// Validate a list of pipeline tasks including custom task and bundles
func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, path string) (errs *apis.FieldError) {
for i, t := range l {
// validate pipeline task name
errs = errs.Also(t.ValidateName().ViaFieldIndex(path, i))
// names cannot be duplicated - checking that pipelineTask names are unique
if _, ok := taskNames[t.Name]; ok {
errs = errs.Also(apis.ErrMultipleOneOf("name").ViaFieldIndex(path, i))
}
taskNames.Insert(t.Name)
// validate custom task, bundle, dag, or final task
errs = errs.Also(t.Validate(ctx).ViaFieldIndex(path, i))
}
return errs
}
// validateUsageOfDeclaredPipelineTaskParameters validates that all parameters referenced in the pipeline Task are declared by the pipeline Task.
func (l PipelineTaskList) validateUsageOfDeclaredPipelineTaskParameters(ctx context.Context, additionalParams []ParamSpec, path string) (errs *apis.FieldError) {
for i, t := range l {
if t.TaskSpec != nil {
errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.TaskSpec.Steps, append(t.TaskSpec.Params, additionalParams...)).ViaFieldIndex(path, i))
}
}
return errs
}
// ValidateName checks whether the PipelineTask's name is a valid DNS label
func (pt PipelineTask) ValidateName() *apis.FieldError {
if err := validation.IsDNS1123Label(pt.Name); len(err) > 0 {
return &apis.FieldError{
Message: fmt.Sprintf("invalid value %q", pt.Name),
Paths: []string{"name"},
Details: "Pipeline Task name must be a valid DNS Label." +
"For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
}
}
return nil
}
// Validate classifies whether a task is a custom task, bundle, or a regular task(dag/final)
// calls the validation routine based on the type of the task
func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(pt.validateRefOrSpec(ctx))
errs = errs.Also(pt.validateEnabledInlineSpec(ctx))
errs = errs.Also(pt.validateEmbeddedOrType())
if pt.Resources != nil {
errs = errs.Also(apis.ErrDisallowedFields("resources"))
}
// taskKinds contains the kinds when the apiVersion is not set, they are not custom tasks,
// if apiVersion is set they are custom tasks.
taskKinds := map[TaskKind]bool{
"": true,
NamespacedTaskKind: true,
}
errs = errs.Also(pt.ValidateOnError(ctx))
// Pipeline task having taskRef/taskSpec with APIVersion is classified as custom task
switch {
case pt.TaskRef != nil && !taskKinds[pt.TaskRef.Kind]:
errs = errs.Also(pt.validateCustomTask())
case pt.TaskRef != nil && pt.TaskRef.APIVersion != "":
errs = errs.Also(pt.validateCustomTask())
case pt.TaskSpec != nil && !taskKinds[TaskKind(pt.TaskSpec.Kind)]:
errs = errs.Also(pt.validateCustomTask())
case pt.TaskSpec != nil && pt.TaskSpec.APIVersion != "":
errs = errs.Also(pt.validateCustomTask())
default:
errs = errs.Also(pt.validateTask(ctx))
}
return //nolint:nakedret
}
// ValidateOnError validates the OnError field of a PipelineTask
func (pt PipelineTask) ValidateOnError(ctx context.Context) (errs *apis.FieldError) {
if pt.OnError != "" && !isParamRefs(string(pt.OnError)) {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "OnError", config.BetaAPIFields))
if pt.OnError != PipelineTaskContinue && pt.OnError != PipelineTaskStopAndFail {
errs = errs.Also(apis.ErrInvalidValue(pt.OnError, "OnError", "PipelineTask OnError must be either \"continue\" or \"stopAndFail\""))
}
if pt.OnError == PipelineTaskContinue && pt.Retries > 0 {
errs = errs.Also(apis.ErrGeneric("PipelineTask OnError cannot be set to \"continue\" when Retries is greater than 0"))
}
}
return errs
}
// validateEnabledInlineSpec validates that pipelineSpec or taskSpec is allowed by checking
// disable-inline-spec field
func (pt PipelineTask) validateEnabledInlineSpec(ctx context.Context) (errs *apis.FieldError) {
if pt.TaskSpec != nil {
if slices.Contains(strings.Split(
config.FromContextOrDefaults(ctx).FeatureFlags.DisableInlineSpec, ","), "pipeline") {
errs = errs.Also(apis.ErrDisallowedFields("taskSpec"))
}
}
if pt.PipelineSpec != nil {
if slices.Contains(strings.Split(
config.FromContextOrDefaults(ctx).FeatureFlags.DisableInlineSpec, ","), "pipeline") {
errs = errs.Also(apis.ErrDisallowedFields("pipelineSpec"))
}
}
return errs
}
func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldError) {
if pt.IsMatrixed() {
// This is a beta feature and will fail validation if it's used in a pipeline spec
// when the enable-api-fields feature gate is set to "stable".
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "matrix", config.BetaAPIFields))
errs = errs.Also(pt.Matrix.validateCombinationsCount(ctx))
errs = errs.Also(pt.Matrix.validateUniqueParams())
}
errs = errs.Also(pt.Matrix.validateParameterInOneOfMatrixOrParams(pt.Params))
return errs
}
func (pt PipelineTask) validateEmbeddedOrType() (errs *apis.FieldError) {
// Reject cases where APIVersion and/or Kind are specified alongside an embedded Task.
// We determine if this is an embedded Task by checking of TaskSpec.TaskSpec.Steps has items.
if pt.TaskSpec != nil && len(pt.TaskSpec.TaskSpec.Steps) > 0 {
if pt.TaskSpec.APIVersion != "" {
errs = errs.Also(&apis.FieldError{
Message: "taskSpec.apiVersion cannot be specified when using taskSpec.steps",
Paths: []string{"taskSpec.apiVersion"},
})
}
if pt.TaskSpec.Kind != "" {
errs = errs.Also(&apis.FieldError{
Message: "taskSpec.kind cannot be specified when using taskSpec.steps",
Paths: []string{"taskSpec.kind"},
})
}
}
return
}
func (pt *PipelineTask) validateWorkspaces(workspaceNames sets.String) (errs *apis.FieldError) {
workspaceBindingNames := sets.NewString()
for i, ws := range pt.Workspaces {
if workspaceBindingNames.Has(ws.Name) {
errs = errs.Also(apis.ErrGeneric(
fmt.Sprintf("workspace name %q must be unique", ws.Name), "").ViaFieldIndex("workspaces", i))
}
if ws.Workspace == "" {
if !workspaceNames.Has(ws.Name) {
errs = errs.Also(apis.ErrInvalidValue(
fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Name),
"",
).ViaFieldIndex("workspaces", i))
}
} else if !workspaceNames.Has(ws.Workspace) {
errs = errs.Also(apis.ErrInvalidValue(
fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Workspace),
"",
).ViaFieldIndex("workspaces", i))
}
workspaceBindingNames.Insert(ws.Name)
}
return errs
}
// validateRefOrSpec validates at least one of taskRef or taskSpec or pipelineRef or pipelineSpec is specified
func (pt PipelineTask) validateRefOrSpec(ctx context.Context) (errs *apis.FieldError) {
// collect all the specified specifications
nonNilFields := []string{}
if pt.TaskRef != nil {
nonNilFields = append(nonNilFields, taskRef)
}
if pt.TaskSpec != nil {
nonNilFields = append(nonNilFields, taskSpec)
}
if pt.PipelineRef != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, pipelineRef, config.AlphaAPIFields))
nonNilFields = append(nonNilFields, pipelineRef)
}
if pt.PipelineSpec != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, pipelineSpec, config.AlphaAPIFields))
nonNilFields = append(nonNilFields, pipelineSpec)
}
// check the length of nonNilFields
// if one of taskRef or taskSpec or pipelineRef or pipelineSpec is specified,
// the length of nonNilFields should exactly be 1
if len(nonNilFields) > 1 {
errs = errs.Also(apis.ErrGeneric("expected exactly one, got multiple", nonNilFields...))
} else if len(nonNilFields) == 0 {
cfg := config.FromContextOrDefaults(ctx)
// check for TaskRef or TaskSpec or PipelineRef or PipelineSpec with alpha feature flag
if cfg.FeatureFlags.EnableAPIFields == config.AlphaAPIFields {
errs = errs.Also(apis.ErrMissingOneOf(taskRef, taskSpec, pipelineRef, pipelineSpec))
} else {
// check for taskRef and taskSpec with beta/stable feature flag
errs = errs.Also(apis.ErrMissingOneOf(taskRef, taskSpec))
}
}
return errs
}
// validateCustomTask validates custom task specifications - checking kind and fail if not yet supported features specified
func (pt PipelineTask) validateCustomTask() (errs *apis.FieldError) {
if pt.TaskRef != nil && pt.TaskRef.Kind == "" {
errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify kind", "taskRef.kind"))
}
if pt.TaskSpec != nil && pt.TaskSpec.Kind == "" {
errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify kind", "taskSpec.kind"))
}
if pt.TaskRef != nil && pt.TaskRef.APIVersion == "" {
errs = errs.Also(apis.ErrInvalidValue("custom task ref must specify apiVersion", "taskRef.apiVersion"))
}
if pt.TaskSpec != nil && pt.TaskSpec.APIVersion == "" {
errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify apiVersion", "taskSpec.apiVersion"))
}
return errs
}
// validateTask validates a pipeline task or a final task for taskRef and taskSpec
func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) {
if pt.TaskSpec != nil {
errs = errs.Also(pt.TaskSpec.Validate(ctx).ViaField("taskSpec"))
}
if pt.TaskRef != nil {
errs = errs.Also(pt.TaskRef.Validate(ctx).ViaField("taskRef"))
}
return errs
}
// validatePipelineWorkspacesDeclarations validates the specified workspaces, ensuring having unique name without any
// empty string,
func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration) (errs *apis.FieldError) {
// Workspace names must be non-empty and unique.
wsTable := sets.NewString()
for i, ws := range wss {
if ws.Name == "" {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("workspace %d has empty name", i),
"").ViaFieldIndex("workspaces", i))
}
if wsTable.Has(ws.Name) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("workspace with name %q appears more than once", ws.Name),
"").ViaFieldIndex("workspaces", i))
}
wsTable.Insert(ws.Name)
}
return errs
}
// validatePipelineParameterUsage validates that parameters referenced in the Pipeline are declared by the Pipeline
func (ps *PipelineSpec) validatePipelineParameterUsage(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(PipelineTaskList(ps.Tasks).validateUsageOfDeclaredPipelineTaskParameters(ctx, ps.Params, "tasks"))
errs = errs.Also(PipelineTaskList(ps.Finally).validateUsageOfDeclaredPipelineTaskParameters(ctx, ps.Params, "finally"))
errs = errs.Also(validatePipelineTaskParameterUsage(ps.Tasks, ps.Params).ViaField("tasks"))
errs = errs.Also(validatePipelineTaskParameterUsage(ps.Finally, ps.Params).ViaField("finally"))
return errs
}
// validatePipelineTaskParameterUsage validates that parameters referenced in the Pipeline Tasks are declared by the Pipeline
func validatePipelineTaskParameterUsage(tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) {
allParamNames := sets.NewString(params.getNames()...)
_, arrayParams, objectParams := params.sortByType()
arrayParamNames := sets.NewString(arrayParams.getNames()...)
objectParameterNameKeys := map[string][]string{}
for _, p := range objectParams {
for k := range p.Properties {
objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k)
}
}
errs = errs.Also(validatePipelineParametersVariables(tasks, "params", allParamNames, arrayParamNames, objectParameterNameKeys))
for i, task := range tasks {
errs = errs.Also(task.Params.validateDuplicateParameters().ViaField("params").ViaIndex(i))
}
return errs
}
// validatePipelineWorkspacesUsage validates that Workspaces referenced in the Pipeline are declared by the Pipeline
func (ps *PipelineSpec) validatePipelineWorkspacesUsage() (errs *apis.FieldError) {
errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks"))
errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally"))
return errs
}
// validatePipelineTasksWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in
// the pipeline
func validatePipelineTasksWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) {
workspaceNames := sets.NewString()
for _, ws := range wss {
workspaceNames.Insert(ws.Name)
}
// Any workspaces used in PipelineTasks should have their name declared in the Pipeline's Workspaces list.
for i, pt := range pts {
errs = errs.Also(pt.validateWorkspaces(workspaceNames).ViaIndex(i))
}
return errs
}
// ValidatePipelineParameterVariables validates parameters with those specified by each pipeline task,
// (1) it validates the type of parameter is either string or array (2) parameter default value matches
// with the type of that param (3) no duplication, feature flag and allowed param type when using param enum
func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) {
// validates all the types within a slice of ParamSpecs
errs = errs.Also(ValidateParameterTypes(ctx, params).ViaField("params"))
errs = errs.Also(params.validateNoDuplicateNames())
errs = errs.Also(params.validateParamEnums(ctx).ViaField("params"))
for i, task := range tasks {
errs = errs.Also(task.Params.validateDuplicateParameters().ViaField("params").ViaIndex(i))
}
return errs
}
func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
for idx, task := range tasks {
errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx))
if task.IsMatrixed() {
errs = errs.Also(task.Matrix.validatePipelineParametersVariablesInMatrixParameters(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx))
}
errs = errs.Also(task.WhenExpressions.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx))
}
return errs
}
func validatePipelineContextVariables(tasks []PipelineTask) *apis.FieldError {
pipelineRunContextNames := sets.NewString().Insert(
"name",
"namespace",
"uid",
)
pipelineContextNames := sets.NewString().Insert(
"name",
)
pipelineTaskContextNames := sets.NewString().Insert(
"retries",
)
var paramValues []string
for _, task := range tasks {
paramValues = task.extractAllParams().extractValues()
}
errs := validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipelineRun", pipelineRunContextNames).
Also(validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipeline", pipelineContextNames)).
Also(validatePipelineContextVariablesInParamValues(paramValues, "context\\.pipelineTask", pipelineTaskContextNames))
return errs
}
// extractAllParams extracts all the parameters in a PipelineTask:
// - pt.Params
// - pt.Matrix.Params
// - pt.Matrix.Include.Params
func (pt *PipelineTask) extractAllParams() Params {
allParams := pt.Params
if pt.Matrix.HasParams() {
allParams = append(allParams, pt.Matrix.Params...)
}
if pt.Matrix.HasInclude() {
for _, include := range pt.Matrix.Include {
allParams = append(allParams, include.Params...)
}
}
return allParams
}
// containsExecutionStatusRef checks if a specified param has a reference to execution status or reason
// $(tasks.<task-name>.status), $(tasks.status), or $(tasks.<task-name>.reason)
func containsExecutionStatusRef(p string) bool {
if strings.HasPrefix(p, "tasks.") {
if strings.HasSuffix(p, ".status") || strings.HasSuffix(p, ".reason") {
return true
}
}
return false
}
func validateExecutionStatusVariables(tasks []PipelineTask, finallyTasks []PipelineTask) (errs *apis.FieldError) {
errs = errs.Also(validateExecutionStatusVariablesInTasks(tasks).ViaField("tasks"))
errs = errs.Also(validateExecutionStatusVariablesInFinally(PipelineTaskList(tasks).Names(), finallyTasks).ViaField("finally"))
return errs
}
// validate dag pipeline tasks, task params can not access execution status of any other task
// dag tasks cannot have param value as $(tasks.pipelineTask.status)
func validateExecutionStatusVariablesInTasks(tasks []PipelineTask) (errs *apis.FieldError) {
for idx, t := range tasks {
errs = errs.Also(t.validateExecutionStatusVariablesDisallowed().ViaIndex(idx))
}
return errs
}
// validate finally tasks accessing execution status of a dag task specified in the pipeline
// $(tasks.pipelineTask.status) is invalid if pipelineTask is not defined as a dag task
func validateExecutionStatusVariablesInFinally(tasksNames sets.String, finally []PipelineTask) (errs *apis.FieldError) {
for idx, t := range finally {
errs = errs.Also(t.validateExecutionStatusVariablesAllowed(tasksNames).ViaIndex(idx))
}
return errs
}
func (pt *PipelineTask) validateExecutionStatusVariablesDisallowed() (errs *apis.FieldError) {
for _, param := range pt.Params {
if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok {
errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, "value").
ViaFieldKey("params", param.Name))
}
}
for i, we := range pt.WhenExpressions {
if expressions, ok := we.GetVarSubstitutionExpressions(); ok {
errs = errs.Also(validateContainsExecutionStatusVariablesDisallowed(expressions, "").
ViaFieldIndex("when", i))
}
}
return errs
}
func validateContainsExecutionStatusVariablesDisallowed(expressions []string, path string) (errs *apis.FieldError) {
if containsExecutionStatusReferences(expressions) {
errs = errs.Also(apis.ErrInvalidValue("pipeline tasks can not refer to execution status"+
" of any other pipeline task or aggregate status of tasks", path))
}
return errs
}
func containsExecutionStatusReferences(expressions []string) bool {
// validate tasks.pipelineTask.status/tasks.status if this expression is not a result reference
if !LooksLikeContainsResultRefs(expressions) {
for _, e := range expressions {
// check if it contains context variable accessing execution status - $(tasks.taskname.status)
// or an aggregate status - $(tasks.status)
if containsExecutionStatusRef(e) {
return true
}
}
}
return false
}
func (pt *PipelineTask) validateExecutionStatusVariablesAllowed(ptNames sets.String) (errs *apis.FieldError) {
for _, param := range pt.Params {
if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok {
errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, "value").
ViaFieldKey("params", param.Name))
}
}
for i, we := range pt.WhenExpressions {
if expressions, ok := we.GetVarSubstitutionExpressions(); ok {
errs = errs.Also(validateExecutionStatusVariablesExpressions(expressions, ptNames, "").
ViaFieldIndex("when", i))
}
}
return errs
}
func validateExecutionStatusVariablesExpressions(expressions []string, ptNames sets.String, fieldPath string) (errs *apis.FieldError) {
// validate tasks.pipelineTask.status if this expression is not a result reference
if !LooksLikeContainsResultRefs(expressions) {
for _, expression := range expressions {
// its a reference to aggregate status of dag tasks - $(tasks.status)
if expression == PipelineTasksAggregateStatus {
continue
}
// check if it contains context variable accessing execution status - $(tasks.taskname.status) | $(tasks.taskname.reason)
if containsExecutionStatusRef(expression) {
var pt string
if strings.HasSuffix(expression, ".status") {
// strip tasks. and .status from tasks.taskname.status to further verify task name
pt = strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".status")
}
if strings.HasSuffix(expression, ".reason") {
// strip tasks. and .reason from tasks.taskname.reason to further verify task name
pt = strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".reason")
}
// report an error if the task name does not exist in the list of dag tasks
if !ptNames.Has(pt) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline task %s is not defined in the pipeline", pt), fieldPath))
}
}
}
}
return errs
}
func validatePipelineContextVariablesInParamValues(paramValues []string, prefix string, contextNames sets.String) (errs *apis.FieldError) {
for _, paramValue := range paramValues {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(paramValue, prefix, contextNames).ViaField("value"))
}
return errs
}
func filter(arr []string, cond func(string) bool) []string {
result := []string{}
for i := range arr {
if cond(arr[i]) {
result = append(result, arr[i])
}
}
return result
}
// validatePipelineResults ensure that pipeline result variables are properly configured
func validatePipelineResults(results []PipelineResult, tasks []PipelineTask, finally []PipelineTask) (errs *apis.FieldError) {
pipelineTaskNames := getPipelineTasksNames(tasks)
pipelineFinallyTaskNames := getPipelineTasksNames(finally)
for idx, result := range results {
expressions, ok := GetVarSubstitutionExpressionsForPipelineResult(result)
if !ok {
errs = errs.Also(apis.ErrInvalidValue("expected pipeline results to be task result expressions but no expressions were found",
"value").ViaFieldIndex("results", idx))
}
if !LooksLikeContainsResultRefs(expressions) {
errs = errs.Also(apis.ErrInvalidValue("expected pipeline results to be task result expressions but an invalid expressions was found",
"value").ViaFieldIndex("results", idx))
}
expressions = filter(expressions, resultref.LooksLikeResultRef)
resultRefs := NewResultRefs(expressions)
if len(expressions) != len(resultRefs) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs),
"value").ViaFieldIndex("results", idx))
}
if !taskContainsResult(result.Value.StringVal, pipelineTaskNames, pipelineFinallyTaskNames) {
errs = errs.Also(apis.ErrInvalidValue("referencing a nonexistent task",
"value").ViaFieldIndex("results", idx))
}
}
return errs
}
// put task names in a set
func getPipelineTasksNames(pipelineTasks []PipelineTask) sets.String {
pipelineTaskNames := make(sets.String)
for _, pipelineTask := range pipelineTasks {
pipelineTaskNames.Insert(pipelineTask.Name)
}
return pipelineTaskNames
}
// taskContainsResult ensures the result value is referenced within the
// task names
func taskContainsResult(resultExpression string, pipelineTaskNames sets.String, pipelineFinallyTaskNames sets.String) bool {
// split incase of multiple resultExpressions in the same result.Value string
// i.e "$(task.<task-name).result.<result-name>) - $(task2.<task2-name).result2.<result2-name>)"
split := strings.Split(resultExpression, "$")
for _, expression := range split {
if expression != "" {
value := stripVarSubExpression("$" + expression)
pr, err := resultref.ParseTaskExpression(value)
if err != nil {
return false
}
if strings.HasPrefix(value, "tasks") && !pipelineTaskNames.Has(pr.ResourceName) {
return false
}
if strings.HasPrefix(value, "finally") && !pipelineFinallyTaskNames.Has(pr.ResourceName) {
return false
}
}
}
return true
}
func validateTasksAndFinallySection(ps *PipelineSpec) *apis.FieldError {
if len(ps.Finally) != 0 && len(ps.Tasks) == 0 {
return apis.ErrInvalidValue(fmt.Sprintf("spec.tasks is empty but spec.finally has %d tasks", len(ps.Finally)), "finally")
}
return nil
}
func validateFinalTasks(tasks []PipelineTask, finalTasks []PipelineTask) (errs *apis.FieldError) {
for idx, f := range finalTasks {
if len(f.RunAfter) != 0 {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("no runAfter allowed under spec.finally, final task %s has runAfter specified", f.Name), "").ViaFieldIndex("finally", idx))
}
}
ts := PipelineTaskList(tasks).Names()
fts := PipelineTaskList(finalTasks).Names()
errs = errs.Also(validateTaskResultReferenceInFinallyTasks(finalTasks, ts, fts))
return errs
}
func validateTaskResultReferenceInFinallyTasks(finalTasks []PipelineTask, ts sets.String, fts sets.String) (errs *apis.FieldError) {
for idx, t := range finalTasks {
for _, p := range t.Params {
if expressions, ok := GetVarSubstitutionExpressionsForParam(p); ok {
errs = errs.Also(validateResultsVariablesExpressionsInFinally(expressions, ts, fts, "value").ViaFieldKey(
"params", p.Name).ViaFieldIndex("finally", idx))
}
}
for i, we := range t.WhenExpressions {
if expressions, ok := we.GetVarSubstitutionExpressions(); ok {
errs = errs.Also(validateResultsVariablesExpressionsInFinally(expressions, ts, fts, "").ViaFieldIndex(
"when", i).ViaFieldIndex("finally", idx))
}
}
}
return errs
}
func validateResultsVariablesExpressionsInFinally(expressions []string, pipelineTasksNames sets.String, finalTasksNames sets.String, fieldPath string) (errs *apis.FieldError) {
if LooksLikeContainsResultRefs(expressions) {
resultRefs := NewResultRefs(expressions)
for _, resultRef := range resultRefs {
pt := resultRef.PipelineTask
if finalTasksNames.Has(pt) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("invalid task result reference, "+
"final task has task result reference from a final task %s", pt), fieldPath))
} else if !pipelineTasksNames.Has(resultRef.PipelineTask) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("invalid task result reference, "+
"final task has task result reference from a task %s which is not defined in the pipeline", pt), fieldPath))
}
}
}
return errs
}
func validateWhenExpressions(ctx context.Context, tasks []PipelineTask, finalTasks []PipelineTask) (errs *apis.FieldError) {
for i, t := range tasks {
errs = errs.Also(t.WhenExpressions.validate(ctx).ViaFieldIndex("tasks", i))
}
for i, t := range finalTasks {
errs = errs.Also(t.WhenExpressions.validate(ctx).ViaFieldIndex("finally", i))
}
return errs
}
// validateGraph ensures the Pipeline's dependency Graph (DAG) make sense: that there is no dependency
// cycle or that they rely on values from Tasks that ran previously, and that the PipelineResource
// is actually an output of the Task it should come from.
func validateGraph(tasks []PipelineTask) (errs *apis.FieldError) {
if _, err := dag.Build(PipelineTaskList(tasks), PipelineTaskList(tasks).Deps()); err != nil {
errs = errs.Also(apis.ErrInvalidValue(err.Error(), "tasks"))
}
return errs
}
func validateMatrix(ctx context.Context, tasks []PipelineTask) (errs *apis.FieldError) {
for idx, task := range tasks {
errs = errs.Also(task.validateMatrix(ctx).ViaIndex(idx))
}
errs = errs.Also(validateTaskResultsFromMatrixedPipelineTasksConsumed(tasks))
return errs
}
// findAndValidateResultRefsForMatrix checks that any result references to Matrixed PipelineTasks if consumed
// by another PipelineTask that the entire array of results produced by a matrix is consumed in aggregate
// since consuming a singular result produced by a matrix is currently not supported
func findAndValidateResultRefsForMatrix(tasks []PipelineTask, taskMapping map[string]PipelineTask) (resultRefs []*ResultRef, errs *apis.FieldError) {
for _, t := range tasks {
for _, p := range t.Params {
if expressions, ok := GetVarSubstitutionExpressionsForParam(p); ok {
if LooksLikeContainsResultRefs(expressions) {
resultRefs, errs = validateMatrixedPipelineTaskConsumed(expressions, taskMapping)
if errs != nil {
return nil, errs
}
}
}
}
}
return resultRefs, errs
}
// validateMatrixedPipelineTaskConsumed checks that any Matrixed Pipeline Task that the is being consumed is consumed in
// aggregate [*] since consuming a singular result produced by a matrix is currently not supported
func validateMatrixedPipelineTaskConsumed(expressions []string, taskMapping map[string]PipelineTask) (resultRefs []*ResultRef, errs *apis.FieldError) {
var filteredExpressions []string
for _, expression := range expressions {
// if it is not matrix result ref expression, skip
if !resultref.LooksLikeResultRef(expression) {
continue
}
// ie. "tasks.<pipelineTaskName>.results.<resultName>[*]"
subExpressions := strings.Split(expression, ".")
pipelineTask := subExpressions[1] // pipelineTaskName
taskConsumed := taskMapping[pipelineTask]
if taskConsumed.IsMatrixed() {
if !strings.HasSuffix(expression, "[*]") {
errs = errs.Also(apis.ErrGeneric("A matrixed pipelineTask can only be consumed in aggregate using [*] notation, but is currently set to " + expression))
}
filteredExpressions = append(filteredExpressions, expression)
}
}
return NewResultRefs(filteredExpressions), errs
}
// validateTaskResultsFromMatrixedPipelineTasksConsumed checks that any Matrixed Pipeline Task that the is being consumed
// is consumed in aggregate [*] since consuming a singular result produced by a matrix is currently not supported.
// It also validates that a matrix emitting results can only emit results with the underlying type string
// if those results are being consumed by another PipelineTask.
func validateTaskResultsFromMatrixedPipelineTasksConsumed(tasks []PipelineTask) (errs *apis.FieldError) {
taskMapping := createTaskMapping(tasks)
resultRefs, errs := findAndValidateResultRefsForMatrix(tasks, taskMapping)
if errs != nil {
return errs
}
errs = errs.Also(validateMatrixEmittingStringResults(resultRefs, taskMapping))
return errs
}
// createTaskMapping maps the PipelineTaskName to the PipelineTask to easily access
// the pipelineTask by Name
func createTaskMapping(tasks []PipelineTask) (taskMap map[string]PipelineTask) {
taskMapping := make(map[string]PipelineTask)
for _, task := range tasks {
taskMapping[task.Name] = task
}
return taskMapping
}
// validateMatrixEmittingStringResults checks a matrix emitting results can only emit results with the underlying type string
// if those results are being consumed by another PipelineTask.
func validateMatrixEmittingStringResults(resultRefs []*ResultRef, taskMapping map[string]PipelineTask) (errs *apis.FieldError) {
for _, resultRef := range resultRefs {
task := taskMapping[resultRef.PipelineTask]
resultName := resultRef.Result
if task.TaskRef != nil {
referencedTaskName := task.TaskRef.Name
referencedTask := taskMapping[referencedTaskName]
if referencedTask.TaskSpec != nil {
errs = errs.Also(validateStringResults(referencedTask.TaskSpec.Results, resultName))
}
} else if task.TaskSpec != nil {
errs = errs.Also(validateStringResults(task.TaskSpec.Results, resultName))
}
}
return errs
}
// validateStringResults ensure that the result type is string
func validateStringResults(results []TaskResult, resultName string) (errs *apis.FieldError) {
for _, result := range results {
if result.Name == resultName {
if result.Type != ResultsTypeString {
errs = errs.Also(apis.ErrInvalidValue(
fmt.Sprintf("Matrixed PipelineTasks emitting results must have an underlying type string, but result %s has type %s in pipelineTask", resultName, string(result.Type)),
"",
))
}
}
}
return errs
}
// validateArtifactReference ensure that the feature flag enableArtifacts is set to true when using artifacts
func validateArtifactReference(ctx context.Context, tasks []PipelineTask, finalTasks []PipelineTask) (errs *apis.FieldError) {
if config.FromContextOrDefaults(ctx).FeatureFlags.EnableArtifacts {
return errs
}
for i, t := range tasks {
for _, v := range t.Params.extractValues() {
if len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(v, -1)) > 0 {
return errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "").ViaField("params").ViaFieldIndex("tasks", i))
}
}
}
for i, t := range finalTasks {
for _, v := range t.Params.extractValues() {
if len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(v, -1)) > 0 {
return errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "").ViaField("params").ViaFieldIndex("finally", i))
}
}
}
return errs
}
// GetIndexingReferencesToArrayParams returns all strings referencing indices of PipelineRun array parameters
// from parameters, workspaces, and when expressions defined in the Pipeline's Tasks and Finally Tasks.
// For example, if a Task in the Pipeline has a parameter with a value "$(params.array-param-name[1])",
// this would be one of the strings returned.
func (ps *PipelineSpec) GetIndexingReferencesToArrayParams() sets.String {
paramsRefs := []string{}
for i := range ps.Tasks {
paramsRefs = append(paramsRefs, ps.Tasks[i].Params.extractValues()...)
if ps.Tasks[i].IsMatrixed() {
paramsRefs = append(paramsRefs, ps.Tasks[i].Matrix.Params.extractValues()...)
}
for j := range ps.Tasks[i].Workspaces {
paramsRefs = append(paramsRefs, ps.Tasks[i].Workspaces[j].SubPath)
}
for _, wes := range ps.Tasks[i].WhenExpressions {
paramsRefs = append(paramsRefs, wes.Input)
paramsRefs = append(paramsRefs, wes.Values...)
}
}
for i := range ps.Finally {
paramsRefs = append(paramsRefs, ps.Finally[i].Params.extractValues()...)
if ps.Finally[i].IsMatrixed() {
paramsRefs = append(paramsRefs, ps.Finally[i].Matrix.Params.extractValues()...)
}
for _, wes := range ps.Finally[i].WhenExpressions {
paramsRefs = append(paramsRefs, wes.Input)
paramsRefs = append(paramsRefs, wes.Values...)
}
}
// extract all array indexing references, for example []{"$(params.array-params[1])"}
arrayIndexParamRefs := []string{}
for _, p := range paramsRefs {
arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...)
}
return sets.NewString(arrayIndexParamRefs...)
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
)
func (pr PipelineRef) convertTo(ctx context.Context, sink *v1.PipelineRef) {
sink.Name = pr.Name
sink.APIVersion = pr.APIVersion
new := v1.ResolverRef{}
pr.ResolverRef.convertTo(ctx, &new)
sink.ResolverRef = new
}
func (pr *PipelineRef) convertFrom(ctx context.Context, source v1.PipelineRef) {
pr.Name = source.Name
pr.APIVersion = source.APIVersion
new := ResolverRef{}
new.convertFrom(ctx, source.ResolverRef)
pr.ResolverRef = new
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
"k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
)
// Validate ensures that a supplied PipelineRef field is populated
// correctly. No errors are returned for a nil PipelineRef.
func (ref *PipelineRef) Validate(ctx context.Context) (errs *apis.FieldError) {
if ref == nil {
return errs
}
if apis.IsInCreate(ctx) && ref.Bundle != "" {
errs = errs.Also(apis.ErrDisallowedFields("bundle"))
}
switch {
case ref.Resolver != "" || ref.Params != nil:
if ref.Params != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
if ref.Name != "" {
errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
}
if ref.Resolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
errs = errs.Also(ValidateParameters(ctx, ref.Params))
}
if ref.Resolver != "" {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
if ref.Name != "" {
// make sure that the name is url-like.
err := RefNameLikeUrl(ref.Name)
if err == nil && !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
// If name is url-like then concise resolver syntax must be enabled
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
}
if err != nil {
errs = errs.Also(apis.ErrInvalidValue(err, "name"))
}
}
}
case ref.Name != "":
// ref name can be a Url-like format.
if err := RefNameLikeUrl(ref.Name); err == nil {
// If name is url-like then concise resolver syntax must be enabled
if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
}
// In stage1 of concise remote resolvers syntax, this is a required field.
// TODO: remove this check when implementing stage 2 where this is optional.
if ref.Resolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
// Or, it must be a valid k8s name
} else {
// ref name must be a valid k8s name
if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 {
errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
}
}
default:
errs = errs.Also(apis.ErrMissingField("name"))
}
return //nolint:nakedret
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/version"
"knative.dev/pkg/apis"
)
var _ apis.Convertible = (*PipelineRun)(nil)
// ConvertTo implements apis.Convertible
func (pr *PipelineRun) ConvertTo(ctx context.Context, to apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
switch sink := to.(type) {
case *v1.PipelineRun:
sink.ObjectMeta = pr.ObjectMeta
if err := serializePipelineRunResources(&sink.ObjectMeta, &pr.Spec); err != nil {
return err
}
if err := pr.Status.convertTo(ctx, &sink.Status, &sink.ObjectMeta); err != nil {
return err
}
return pr.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta)
default:
return fmt.Errorf("unknown version, got: %T", sink)
}
}
// ConvertTo implements apis.Convertible
func (prs PipelineRunSpec) ConvertTo(ctx context.Context, sink *v1.PipelineRunSpec, meta *metav1.ObjectMeta) error {
if prs.PipelineRef != nil {
sink.PipelineRef = &v1.PipelineRef{}
prs.PipelineRef.convertTo(ctx, sink.PipelineRef)
}
if prs.PipelineSpec != nil {
sink.PipelineSpec = &v1.PipelineSpec{}
err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec, meta)
if err != nil {
return err
}
}
sink.Params = nil
for _, p := range prs.Params {
new := v1.Param{}
p.convertTo(ctx, &new)
sink.Params = append(sink.Params, new)
}
sink.Status = v1.PipelineRunSpecStatus(prs.Status)
if prs.Timeouts != nil {
sink.Timeouts = &v1.TimeoutFields{}
prs.Timeouts.convertTo(ctx, sink.Timeouts)
}
if prs.Timeout != nil {
sink.Timeouts = &v1.TimeoutFields{}
sink.Timeouts.Pipeline = prs.Timeout
}
sink.TaskRunTemplate = v1.PipelineTaskRunTemplate{}
sink.TaskRunTemplate.PodTemplate = prs.PodTemplate
sink.TaskRunTemplate.ServiceAccountName = prs.ServiceAccountName
sink.Workspaces = nil
for _, w := range prs.Workspaces {
new := v1.WorkspaceBinding{}
w.convertTo(ctx, &new)
sink.Workspaces = append(sink.Workspaces, new)
}
sink.TaskRunSpecs = nil
for _, ptrs := range prs.TaskRunSpecs {
new := v1.PipelineTaskRunSpec{}
ptrs.convertTo(ctx, &new)
sink.TaskRunSpecs = append(sink.TaskRunSpecs, new)
}
return nil
}
// ConvertFrom implements apis.Convertible
func (pr *PipelineRun) ConvertFrom(ctx context.Context, from apis.Convertible) error {
switch source := from.(type) {
case *v1.PipelineRun:
pr.ObjectMeta = source.ObjectMeta
if err := deserializePipelineRunResources(&pr.ObjectMeta, &pr.Spec); err != nil {
return err
}
if err := pr.Status.convertFrom(ctx, &source.Status, &pr.ObjectMeta); err != nil {
return err
}
return pr.Spec.ConvertFrom(ctx, &source.Spec, &pr.ObjectMeta)
default:
return fmt.Errorf("unknown version, got: %T", pr)
}
}
// ConvertFrom implements apis.Convertible
func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.PipelineRunSpec, meta *metav1.ObjectMeta) error {
if source.PipelineRef != nil {
newPipelineRef := PipelineRef{}
newPipelineRef.convertFrom(ctx, *source.PipelineRef)
prs.PipelineRef = &newPipelineRef
}
if source.PipelineSpec != nil {
newPipelineSpec := PipelineSpec{}
err := newPipelineSpec.ConvertFrom(ctx, source.PipelineSpec, meta)
if err != nil {
return err
}
prs.PipelineSpec = &newPipelineSpec
}
prs.Params = nil
for _, p := range source.Params {
new := Param{}
new.ConvertFrom(ctx, p)
prs.Params = append(prs.Params, new)
}
prs.ServiceAccountName = source.TaskRunTemplate.ServiceAccountName
prs.Status = PipelineRunSpecStatus(source.Status)
if source.Timeouts != nil {
newTimeouts := &TimeoutFields{}
newTimeouts.convertFrom(ctx, *source.Timeouts)
prs.Timeouts = newTimeouts
}
prs.PodTemplate = source.TaskRunTemplate.PodTemplate
prs.Workspaces = nil
for _, w := range source.Workspaces {
new := WorkspaceBinding{}
new.ConvertFrom(ctx, w)
prs.Workspaces = append(prs.Workspaces, new)
}
prs.TaskRunSpecs = nil
for _, trs := range source.TaskRunSpecs {
new := PipelineTaskRunSpec{}
new.convertFrom(ctx, trs)
prs.TaskRunSpecs = append(prs.TaskRunSpecs, new)
}
return nil
}
func (tf TimeoutFields) convertTo(ctx context.Context, sink *v1.TimeoutFields) {
sink.Pipeline = tf.Pipeline
sink.Tasks = tf.Tasks
sink.Finally = tf.Finally
}
func (tf *TimeoutFields) convertFrom(ctx context.Context, source v1.TimeoutFields) {
tf.Pipeline = source.Pipeline
tf.Tasks = source.Tasks
tf.Finally = source.Finally
}
func (ptrs PipelineTaskRunSpec) convertTo(ctx context.Context, sink *v1.PipelineTaskRunSpec) {
sink.PipelineTaskName = ptrs.PipelineTaskName
sink.ServiceAccountName = ptrs.TaskServiceAccountName
sink.PodTemplate = ptrs.TaskPodTemplate
sink.StepSpecs = nil
for _, so := range ptrs.StepOverrides {
new := v1.TaskRunStepSpec{}
so.convertTo(ctx, &new)
sink.StepSpecs = append(sink.StepSpecs, new)
}
sink.SidecarSpecs = nil
for _, so := range ptrs.SidecarOverrides {
new := v1.TaskRunSidecarSpec{}
so.convertTo(ctx, &new)
sink.SidecarSpecs = append(sink.SidecarSpecs, new)
}
if ptrs.Metadata != nil {
sink.Metadata = &v1.PipelineTaskMetadata{}
ptrs.Metadata.convertTo(ctx, sink.Metadata)
}
sink.ComputeResources = ptrs.ComputeResources
sink.Timeout = ptrs.Timeout
}
func (ptrs *PipelineTaskRunSpec) convertFrom(ctx context.Context, source v1.PipelineTaskRunSpec) {
ptrs.PipelineTaskName = source.PipelineTaskName
ptrs.TaskServiceAccountName = source.ServiceAccountName
ptrs.TaskPodTemplate = source.PodTemplate
ptrs.StepOverrides = nil
for _, so := range source.StepSpecs {
new := TaskRunStepOverride{}
new.convertFrom(ctx, so)
ptrs.StepOverrides = append(ptrs.StepOverrides, new)
}
ptrs.SidecarOverrides = nil
for _, so := range source.SidecarSpecs {
new := TaskRunSidecarOverride{}
new.convertFrom(ctx, so)
ptrs.SidecarOverrides = append(ptrs.SidecarOverrides, new)
}
if source.Metadata != nil {
newMetadata := PipelineTaskMetadata{}
newMetadata.convertFrom(ctx, *source.Metadata)
ptrs.Metadata = &newMetadata
}
ptrs.ComputeResources = source.ComputeResources
ptrs.Timeout = source.Timeout
}
func (prs *PipelineRunStatus) convertTo(ctx context.Context, sink *v1.PipelineRunStatus, meta *metav1.ObjectMeta) error {
sink.Status = prs.Status
sink.StartTime = prs.StartTime
sink.CompletionTime = prs.CompletionTime
sink.Results = nil
for _, pr := range prs.PipelineResults {
new := v1.PipelineRunResult{}
pr.convertTo(ctx, &new)
sink.Results = append(sink.Results, new)
}
if prs.PipelineSpec != nil {
sink.PipelineSpec = &v1.PipelineSpec{}
err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec, meta)
if err != nil {
return err
}
}
sink.SkippedTasks = nil
for _, st := range prs.SkippedTasks {
new := v1.SkippedTask{}
st.convertTo(ctx, &new)
sink.SkippedTasks = append(sink.SkippedTasks, new)
}
sink.ChildReferences = nil
for _, cr := range prs.ChildReferences {
new := v1.ChildStatusReference{}
cr.convertTo(ctx, &new)
sink.ChildReferences = append(sink.ChildReferences, new)
}
sink.FinallyStartTime = prs.FinallyStartTime
if prs.Provenance != nil {
new := v1.Provenance{}
prs.Provenance.convertTo(ctx, &new)
sink.Provenance = &new
}
return nil
}
func (prs *PipelineRunStatus) convertFrom(ctx context.Context, source *v1.PipelineRunStatus, meta *metav1.ObjectMeta) error {
prs.Status = source.Status
prs.StartTime = source.StartTime
prs.CompletionTime = source.CompletionTime
prs.PipelineResults = nil
for _, pr := range source.Results {
new := PipelineRunResult{}
new.convertFrom(ctx, pr)
prs.PipelineResults = append(prs.PipelineResults, new)
}
if source.PipelineSpec != nil {
newPipelineSpec := PipelineSpec{}
err := newPipelineSpec.ConvertFrom(ctx, source.PipelineSpec, meta)
if err != nil {
return err
}
prs.PipelineSpec = &newPipelineSpec
}
prs.SkippedTasks = nil
for _, st := range source.SkippedTasks {
new := SkippedTask{}
new.convertFrom(ctx, st)
prs.SkippedTasks = append(prs.SkippedTasks, new)
}
prs.ChildReferences = nil
for _, cr := range source.ChildReferences {
new := ChildStatusReference{}
new.convertFrom(ctx, cr)
prs.ChildReferences = append(prs.ChildReferences, new)
}
prs.FinallyStartTime = source.FinallyStartTime
if source.Provenance != nil {
new := Provenance{}
new.convertFrom(ctx, *source.Provenance)
prs.Provenance = &new
}
return nil
}
func (prr PipelineRunResult) convertTo(ctx context.Context, sink *v1.PipelineRunResult) {
sink.Name = prr.Name
newValue := v1.ParamValue{}
prr.Value.convertTo(ctx, &newValue)
sink.Value = newValue
}
func (prr *PipelineRunResult) convertFrom(ctx context.Context, source v1.PipelineRunResult) {
prr.Name = source.Name
newValue := ParamValue{}
newValue.convertFrom(ctx, source.Value)
prr.Value = newValue
}
func (st SkippedTask) convertTo(ctx context.Context, sink *v1.SkippedTask) {
sink.Name = st.Name
sink.Reason = v1.SkippingReason(st.Reason)
sink.WhenExpressions = nil
for _, we := range st.WhenExpressions {
new := v1.WhenExpression{}
we.convertTo(ctx, &new)
sink.WhenExpressions = append(sink.WhenExpressions, new)
}
}
func (st *SkippedTask) convertFrom(ctx context.Context, source v1.SkippedTask) {
st.Name = source.Name
st.Reason = SkippingReason(source.Reason)
st.WhenExpressions = nil
for _, we := range source.WhenExpressions {
new := WhenExpression{}
new.convertFrom(ctx, we)
st.WhenExpressions = append(st.WhenExpressions, new)
}
}
func (csr ChildStatusReference) convertTo(ctx context.Context, sink *v1.ChildStatusReference) {
sink.TypeMeta = csr.TypeMeta
sink.Name = csr.Name
sink.DisplayName = csr.DisplayName
sink.PipelineTaskName = csr.PipelineTaskName
sink.WhenExpressions = nil
for _, we := range csr.WhenExpressions {
new := v1.WhenExpression{}
we.convertTo(ctx, &new)
sink.WhenExpressions = append(sink.WhenExpressions, new)
}
}
func (csr *ChildStatusReference) convertFrom(ctx context.Context, source v1.ChildStatusReference) {
csr.TypeMeta = source.TypeMeta
csr.Name = source.Name
csr.DisplayName = source.DisplayName
csr.PipelineTaskName = source.PipelineTaskName
csr.WhenExpressions = nil
for _, we := range source.WhenExpressions {
new := WhenExpression{}
new.convertFrom(ctx, we)
csr.WhenExpressions = append(csr.WhenExpressions, new)
}
}
func serializePipelineRunResources(meta *metav1.ObjectMeta, spec *PipelineRunSpec) error {
if spec.Resources == nil {
return nil
}
return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey)
}
func deserializePipelineRunResources(meta *metav1.ObjectMeta, spec *PipelineRunSpec) error {
resources := []PipelineResourceBinding{}
err := version.DeserializeFromMetadata(meta, &resources, resourcesAnnotationKey)
if err != nil {
return err
}
if len(resources) != 0 {
spec.Resources = resources
}
return nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"regexp"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/kmap"
)
var (
_ apis.Defaultable = (*PipelineRun)(nil)
filterReservedAnnotationRegexp = regexp.MustCompile(pipeline.TektonReservedAnnotationExpr)
)
// SetDefaults implements apis.Defaultable
func (pr *PipelineRun) SetDefaults(ctx context.Context) {
pr.Spec.SetDefaults(ctx)
// Silently filtering out Tekton Reserved annotations at creation
if apis.IsInCreate(ctx) {
pr.ObjectMeta.Annotations = kmap.Filter(pr.ObjectMeta.Annotations, func(s string) bool {
return filterReservedAnnotationRegexp.MatchString(s)
})
}
}
// SetDefaults implements apis.Defaultable
func (prs *PipelineRunSpec) SetDefaults(ctx context.Context) {
cfg := config.FromContextOrDefaults(ctx)
if prs.PipelineRef != nil && prs.PipelineRef.Name == "" && prs.PipelineRef.Resolver == "" {
prs.PipelineRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType)
}
if prs.Timeout == nil && prs.Timeouts == nil {
prs.Timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute}
}
if prs.Timeouts != nil && prs.Timeouts.Pipeline == nil {
prs.Timeouts.Pipeline = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute}
}
defaultSA := cfg.Defaults.DefaultServiceAccount
if prs.ServiceAccountName == "" && defaultSA != "" {
prs.ServiceAccountName = defaultSA
}
defaultPodTemplate := cfg.Defaults.DefaultPodTemplate
prs.PodTemplate = pod.MergePodTemplateWithDefault(prs.PodTemplate, defaultPodTemplate)
if prs.PipelineSpec != nil {
prs.PipelineSpec.SetDefaults(ctx)
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
apisconfig "github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
// +genclient
// +genreconciler:krshapedlogic=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// PipelineRun represents a single execution of a Pipeline. PipelineRuns are how
// the graph of Tasks declared in a Pipeline are executed; they specify inputs
// to Pipelines such as parameter values and capture operational aspects of the
// Tasks execution such as service account and tolerations. Creating a
// PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.
//
// Deprecated: Please use v1.PipelineRun instead.
type PipelineRun struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec PipelineRunSpec `json:"spec,omitempty"`
// +optional
Status PipelineRunStatus `json:"status,omitempty"`
}
// GetName Returns the name of the PipelineRun
func (pr *PipelineRun) GetName() string {
return pr.ObjectMeta.GetName()
}
// GetStatusCondition returns the task run status as a ConditionAccessor
func (pr *PipelineRun) GetStatusCondition() apis.ConditionAccessor {
return &pr.Status
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*PipelineRun) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind(pipeline.PipelineRunControllerName)
}
// IsDone returns true if the PipelineRun's status indicates that it is done.
func (pr *PipelineRun) IsDone() bool {
return !pr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown()
}
// HasStarted function check whether pipelinerun has valid start time set in its status
func (pr *PipelineRun) HasStarted() bool {
return pr.Status.StartTime != nil && !pr.Status.StartTime.IsZero()
}
// IsCancelled returns true if the PipelineRun's spec status is set to Cancelled state
func (pr *PipelineRun) IsCancelled() bool {
return pr.Spec.Status == PipelineRunSpecStatusCancelled
}
// IsGracefullyCancelled returns true if the PipelineRun's spec status is set to CancelledRunFinally state
func (pr *PipelineRun) IsGracefullyCancelled() bool {
return pr.Spec.Status == PipelineRunSpecStatusCancelledRunFinally
}
// IsGracefullyStopped returns true if the PipelineRun's spec status is set to StoppedRunFinally state
func (pr *PipelineRun) IsGracefullyStopped() bool {
return pr.Spec.Status == PipelineRunSpecStatusStoppedRunFinally
}
// PipelineTimeout returns the applicable timeout for the PipelineRun
func (pr *PipelineRun) PipelineTimeout(ctx context.Context) time.Duration {
if pr.Spec.Timeout != nil {
return pr.Spec.Timeout.Duration
}
if pr.Spec.Timeouts != nil && pr.Spec.Timeouts.Pipeline != nil {
return pr.Spec.Timeouts.Pipeline.Duration
}
return time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) * time.Minute
}
// TasksTimeout returns the tasks timeout for the PipelineRun, if set,
// or the tasks timeout computed from the Pipeline and Finally timeouts, if those are set.
func (pr *PipelineRun) TasksTimeout() *metav1.Duration {
t := pr.Spec.Timeouts
if t == nil {
return nil
}
if t.Tasks != nil {
return t.Tasks
}
if t.Pipeline != nil && t.Finally != nil {
if t.Pipeline.Duration == apisconfig.NoTimeoutDuration || t.Finally.Duration == apisconfig.NoTimeoutDuration {
return nil
}
return &metav1.Duration{Duration: (t.Pipeline.Duration - t.Finally.Duration)}
}
return nil
}
// FinallyTimeout returns the finally timeout for the PipelineRun, if set,
// or the finally timeout computed from the Pipeline and Tasks timeouts, if those are set.
func (pr *PipelineRun) FinallyTimeout() *metav1.Duration {
t := pr.Spec.Timeouts
if t == nil {
return nil
}
if t.Finally != nil {
return t.Finally
}
if t.Pipeline != nil && t.Tasks != nil {
if t.Pipeline.Duration == apisconfig.NoTimeoutDuration || t.Tasks.Duration == apisconfig.NoTimeoutDuration {
return nil
}
return &metav1.Duration{Duration: (t.Pipeline.Duration - t.Tasks.Duration)}
}
return nil
}
// IsPending returns true if the PipelineRun's spec status is set to Pending state
func (pr *PipelineRun) IsPending() bool {
return pr.Spec.Status == PipelineRunSpecStatusPending
}
// GetNamespacedName returns a k8s namespaced name that identifies this PipelineRun
func (pr *PipelineRun) GetNamespacedName() types.NamespacedName {
return types.NamespacedName{Namespace: pr.Namespace, Name: pr.Name}
}
// IsTimeoutConditionSet returns true when the pipelinerun has the pipelinerun timed out reason
func (pr *PipelineRun) IsTimeoutConditionSet() bool {
condition := pr.Status.GetCondition(apis.ConditionSucceeded)
return condition.IsFalse() && condition.Reason == PipelineRunReasonTimedOut.String()
}
// SetTimeoutCondition sets the status of the PipelineRun to timed out.
func (pr *PipelineRun) SetTimeoutCondition(ctx context.Context) {
pr.Status.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: PipelineRunReasonTimedOut.String(),
Message: fmt.Sprintf("PipelineRun %q failed to finish within %q", pr.Name, pr.PipelineTimeout(ctx).String()),
})
}
// HasTimedOut returns true if a pipelinerun has exceeded its spec.Timeout based on its status.Timeout
func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bool {
timeout := pr.PipelineTimeout(ctx)
startTime := pr.Status.StartTime
if !startTime.IsZero() {
if timeout == config.NoTimeoutDuration {
return false
}
runtime := c.Since(startTime.Time)
if runtime > timeout {
return true
}
}
return false
}
// HasTimedOutForALongTime returns true if a pipelinerun has exceeed its spec.Timeout based its status.StartTime
// by a large margin
func (pr *PipelineRun) HasTimedOutForALongTime(ctx context.Context, c clock.PassiveClock) bool {
if !pr.HasTimedOut(ctx, c) {
return false
}
timeout := pr.PipelineTimeout(ctx)
startTime := pr.Status.StartTime
runtime := c.Since(startTime.Time)
// We are arbitrarily defining large margin as doubling the spec.timeout
return runtime >= 2*timeout
}
// HaveTasksTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Tasks
func (pr *PipelineRun) HaveTasksTimedOut(ctx context.Context, c clock.PassiveClock) bool {
timeout := pr.TasksTimeout()
startTime := pr.Status.StartTime
if !startTime.IsZero() && timeout != nil {
if timeout.Duration == config.NoTimeoutDuration {
return false
}
runtime := c.Since(startTime.Time)
if runtime > timeout.Duration {
return true
}
}
return false
}
// HasFinallyTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Finally, based on status.FinallyStartTime
func (pr *PipelineRun) HasFinallyTimedOut(ctx context.Context, c clock.PassiveClock) bool {
timeout := pr.FinallyTimeout()
startTime := pr.Status.FinallyStartTime
if startTime != nil && !startTime.IsZero() && timeout != nil {
if timeout.Duration == config.NoTimeoutDuration {
return false
}
runtime := c.Since(startTime.Time)
if runtime > timeout.Duration {
return true
}
}
return false
}
// HasVolumeClaimTemplate returns true if PipelineRun contains volumeClaimTemplates that is
// used for creating PersistentVolumeClaims with an OwnerReference for each run
func (pr *PipelineRun) HasVolumeClaimTemplate() bool {
for _, ws := range pr.Spec.Workspaces {
if ws.VolumeClaimTemplate != nil {
return true
}
}
return false
}
// PipelineRunSpec defines the desired state of PipelineRun
type PipelineRunSpec struct {
// +optional
PipelineRef *PipelineRef `json:"pipelineRef,omitempty"`
// Specifying PipelineSpec can be disabled by setting
// `disable-inline-spec` feature flag.
// See Pipeline.spec (API version: tekton.dev/v1beta1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
PipelineSpec *PipelineSpec `json:"pipelineSpec,omitempty"`
// Resources is a list of bindings specifying which actual instances of
// PipelineResources to use for the resources the Pipeline has declared
// it needs.
//
// Deprecated: Unused, preserved only for backwards compatibility
// +listType=atomic
Resources []PipelineResourceBinding `json:"resources,omitempty"`
// Params is a list of parameter names and values.
Params Params `json:"params,omitempty"`
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// Used for cancelling a pipelinerun (and maybe more later on)
// +optional
Status PipelineRunSpecStatus `json:"status,omitempty"`
// Time after which the Pipeline times out.
// Currently three keys are accepted in the map
// pipeline, tasks and finally
// with Timeouts.pipeline >= Timeouts.tasks + Timeouts.finally
// +optional
Timeouts *TimeoutFields `json:"timeouts,omitempty"`
// Timeout is the Time after which the Pipeline times out.
// Defaults to never.
// Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
//
// Deprecated: use pipelineRunSpec.Timeouts.Pipeline instead
//
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// PodTemplate holds pod specific configuration
PodTemplate *pod.PodTemplate `json:"podTemplate,omitempty"`
// Workspaces holds a set of workspace bindings that must match names
// with those declared in the pipeline.
// +optional
// +listType=atomic
Workspaces []WorkspaceBinding `json:"workspaces,omitempty"`
// TaskRunSpecs holds a set of runtime specs
// +optional
// +listType=atomic
TaskRunSpecs []PipelineTaskRunSpec `json:"taskRunSpecs,omitempty"`
// ManagedBy indicates which controller is responsible for reconciling
// this resource. If unset or set to "tekton.dev/pipeline", the default
// Tekton controller will manage this resource.
// This field is immutable.
// +optional
ManagedBy *string `json:"managedBy,omitempty"`
}
// TimeoutFields allows granular specification of pipeline, task, and finally timeouts
type TimeoutFields struct {
// Pipeline sets the maximum allowed duration for execution of the entire pipeline. The sum of individual timeouts for tasks and finally must not exceed this value.
Pipeline *metav1.Duration `json:"pipeline,omitempty"`
// Tasks sets the maximum allowed duration of this pipeline's tasks
Tasks *metav1.Duration `json:"tasks,omitempty"`
// Finally sets the maximum allowed duration of this pipeline's finally
Finally *metav1.Duration `json:"finally,omitempty"`
}
// PipelineRunSpecStatus defines the pipelinerun spec status the user can provide
type PipelineRunSpecStatus string
const (
// PipelineRunSpecStatusCancelled indicates that the user wants to cancel the task,
// if not already cancelled or terminated
PipelineRunSpecStatusCancelled = "Cancelled"
// PipelineRunSpecStatusCancelledRunFinally indicates that the user wants to cancel the pipeline run,
// if not already cancelled or terminated, but ensure finally is run normally
PipelineRunSpecStatusCancelledRunFinally = "CancelledRunFinally"
// PipelineRunSpecStatusStoppedRunFinally indicates that the user wants to stop the pipeline run,
// wait for already running tasks to be completed and run finally
// if not already cancelled or terminated
PipelineRunSpecStatusStoppedRunFinally = "StoppedRunFinally"
// PipelineRunSpecStatusPending indicates that the user wants to postpone starting a PipelineRun
// until some condition is met
PipelineRunSpecStatusPending = "PipelineRunPending"
)
// PipelineRunStatus defines the observed state of PipelineRun
type PipelineRunStatus struct {
duckv1.Status `json:",inline"`
// PipelineRunStatusFields inlines the status fields.
PipelineRunStatusFields `json:",inline"`
}
// PipelineRunReason represents a reason for the pipeline run "Succeeded" condition
type PipelineRunReason string
const (
// PipelineRunReasonStarted is the reason set when the PipelineRun has just started
PipelineRunReasonStarted PipelineRunReason = "Started"
// PipelineRunReasonRunning is the reason set when the PipelineRun is running
PipelineRunReasonRunning PipelineRunReason = "Running"
// PipelineRunReasonSuccessful is the reason set when the PipelineRun completed successfully
PipelineRunReasonSuccessful PipelineRunReason = "Succeeded"
// PipelineRunReasonCompleted is the reason set when the PipelineRun completed successfully with one or more skipped Tasks
PipelineRunReasonCompleted PipelineRunReason = "Completed"
// PipelineRunReasonFailed is the reason set when the PipelineRun completed with a failure
PipelineRunReasonFailed PipelineRunReason = "Failed"
// PipelineRunReasonCancelled is the reason set when the PipelineRun cancelled by the user
// This reason may be found with a corev1.ConditionFalse status, if the cancellation was processed successfully
// This reason may be found with a corev1.ConditionUnknown status, if the cancellation is being processed or failed
PipelineRunReasonCancelled PipelineRunReason = "Cancelled"
// PipelineRunReasonPending is the reason set when the PipelineRun is in the pending state
PipelineRunReasonPending PipelineRunReason = "PipelineRunPending"
// PipelineRunReasonTimedOut is the reason set when the PipelineRun has timed out
PipelineRunReasonTimedOut PipelineRunReason = "PipelineRunTimeout"
// PipelineRunReasonStopping indicates that no new Tasks will be scheduled by the controller, and the
// pipeline will stop once all running tasks complete their work
PipelineRunReasonStopping PipelineRunReason = "PipelineRunStopping"
// PipelineRunReasonCancelledRunningFinally indicates that pipeline has been gracefully cancelled
// and no new Tasks will be scheduled by the controller, but final tasks are now running
PipelineRunReasonCancelledRunningFinally PipelineRunReason = "CancelledRunningFinally"
// PipelineRunReasonStoppedRunningFinally indicates that pipeline has been gracefully stopped
// and no new Tasks will be scheduled by the controller, but final tasks are now running
PipelineRunReasonStoppedRunningFinally PipelineRunReason = "StoppedRunningFinally"
)
func (t PipelineRunReason) String() string {
return string(t)
}
var pipelineRunCondSet = apis.NewBatchConditionSet()
// GetCondition returns the Condition matching the given type.
func (pr *PipelineRunStatus) GetCondition(t apis.ConditionType) *apis.Condition {
return pipelineRunCondSet.Manage(pr).GetCondition(t)
}
// InitializeConditions will set all conditions in pipelineRunCondSet to unknown for the PipelineRun
// and set the started time to the current time
func (pr *PipelineRunStatus) InitializeConditions(c clock.PassiveClock) {
started := false
if pr.StartTime.IsZero() {
pr.StartTime = &metav1.Time{Time: c.Now()}
started = true
}
conditionManager := pipelineRunCondSet.Manage(pr)
conditionManager.InitializeConditions()
// Ensure the started reason is set for the "Succeeded" condition
if started {
initialCondition := conditionManager.GetCondition(apis.ConditionSucceeded)
initialCondition.Reason = PipelineRunReasonStarted.String()
conditionManager.SetCondition(*initialCondition)
}
}
// SetCondition sets the condition, unsetting previous conditions with the same
// type as necessary.
func (pr *PipelineRunStatus) SetCondition(newCond *apis.Condition) {
if newCond != nil {
pipelineRunCondSet.Manage(pr).SetCondition(*newCond)
}
}
// MarkSucceeded changes the Succeeded condition to True with the provided reason and message.
func (pr *PipelineRunStatus) MarkSucceeded(reason, messageFormat string, messageA ...interface{}) {
pipelineRunCondSet.Manage(pr).MarkTrueWithReason(apis.ConditionSucceeded, reason, messageFormat, messageA...)
succeeded := pr.GetCondition(apis.ConditionSucceeded)
pr.CompletionTime = &succeeded.LastTransitionTime.Inner
}
// MarkFailed changes the Succeeded condition to False with the provided reason and message.
func (pr *PipelineRunStatus) MarkFailed(reason, messageFormat string, messageA ...interface{}) {
pipelineRunCondSet.Manage(pr).MarkFalse(apis.ConditionSucceeded, reason, messageFormat, messageA...)
succeeded := pr.GetCondition(apis.ConditionSucceeded)
pr.CompletionTime = &succeeded.LastTransitionTime.Inner
}
// MarkRunning changes the Succeeded condition to Unknown with the provided reason and message.
func (pr *PipelineRunStatus) MarkRunning(reason, messageFormat string, messageA ...interface{}) {
pipelineRunCondSet.Manage(pr).MarkUnknown(apis.ConditionSucceeded, reason, messageFormat, messageA...)
}
// ChildStatusReference is used to point to the statuses of individual TaskRuns and Runs within this PipelineRun.
type ChildStatusReference struct {
runtime.TypeMeta `json:",inline"`
// Name is the name of the TaskRun or Run this is referencing.
Name string `json:"name,omitempty"`
// DisplayName is a user-facing name of the pipelineTask that may be
// used to populate a UI.
DisplayName string `json:"displayName,omitempty"`
// PipelineTaskName is the name of the PipelineTask this is referencing.
PipelineTaskName string `json:"pipelineTaskName,omitempty"`
// WhenExpressions is the list of checks guarding the execution of the PipelineTask
// +optional
// +listType=atomic
WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"`
}
// PipelineRunStatusFields holds the fields of PipelineRunStatus' status.
// This is defined separately and inlined so that other types can readily
// consume these fields via duck typing.
type PipelineRunStatusFields struct {
// StartTime is the time the PipelineRun is actually started.
StartTime *metav1.Time `json:"startTime,omitempty"`
// CompletionTime is the time the PipelineRun completed.
CompletionTime *metav1.Time `json:"completionTime,omitempty"`
// TaskRuns is a map of PipelineRunTaskRunStatus with the taskRun name as the key.
//
// Deprecated: use ChildReferences instead. As of v0.45.0, this field is no
// longer populated and is only included for backwards compatibility with
// older server versions.
// +optional
TaskRuns map[string]*PipelineRunTaskRunStatus `json:"taskRuns,omitempty"`
// Runs is a map of PipelineRunRunStatus with the run name as the key
//
// Deprecated: use ChildReferences instead. As of v0.45.0, this field is no
// longer populated and is only included for backwards compatibility with
// older server versions.
// +optional
Runs map[string]*PipelineRunRunStatus `json:"runs,omitempty"`
// PipelineResults are the list of results written out by the pipeline task's containers
// +optional
// +listType=atomic
PipelineResults []PipelineRunResult `json:"pipelineResults,omitempty"`
// PipelineSpec contains the exact spec used to instantiate the run.
// See Pipeline.spec (API version: tekton.dev/v1beta1)
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
PipelineSpec *PipelineSpec `json:"pipelineSpec,omitempty"`
// list of tasks that were skipped due to when expressions evaluating to false
// +optional
// +listType=atomic
SkippedTasks []SkippedTask `json:"skippedTasks,omitempty"`
// list of TaskRun and Run names, PipelineTask names, and API versions/kinds for children of this PipelineRun.
// +optional
// +listType=atomic
ChildReferences []ChildStatusReference `json:"childReferences,omitempty"`
// FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.
// +optional
FinallyStartTime *metav1.Time `json:"finallyStartTime,omitempty"`
// Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).
// +optional
Provenance *Provenance `json:"provenance,omitempty"`
// SpanContext contains tracing span context fields
SpanContext map[string]string `json:"spanContext,omitempty"`
}
// SkippedTask is used to describe the Tasks that were skipped due to their When Expressions
// evaluating to False. This is a struct because we are looking into including more details
// about the When Expressions that caused this Task to be skipped.
type SkippedTask struct {
// Name is the Pipeline Task name
Name string `json:"name"`
// Reason is the cause of the PipelineTask being skipped.
Reason SkippingReason `json:"reason"`
// WhenExpressions is the list of checks guarding the execution of the PipelineTask
// +optional
// +listType=atomic
WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"`
}
// SkippingReason explains why a PipelineTask was skipped.
type SkippingReason string
const (
// WhenExpressionsSkip means the task was skipped due to at least one of its when expressions evaluating to false
WhenExpressionsSkip SkippingReason = "When Expressions evaluated to false"
// ParentTasksSkip means the task was skipped because its parent was skipped
ParentTasksSkip SkippingReason = "Parent Tasks were skipped"
// StoppingSkip means the task was skipped because the pipeline run is stopping
StoppingSkip SkippingReason = "PipelineRun was stopping"
// GracefullyCancelledSkip means the task was skipped because the pipeline run has been gracefully cancelled
GracefullyCancelledSkip SkippingReason = "PipelineRun was gracefully cancelled"
// GracefullyStoppedSkip means the task was skipped because the pipeline run has been gracefully stopped
GracefullyStoppedSkip SkippingReason = "PipelineRun was gracefully stopped"
// MissingResultsSkip means the task was skipped because it's missing necessary results
MissingResultsSkip SkippingReason = "Results were missing"
// PipelineTimedOutSkip means the task was skipped because the PipelineRun has passed its overall timeout.
PipelineTimedOutSkip SkippingReason = "PipelineRun timeout has been reached"
// TasksTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Tasks.
TasksTimedOutSkip SkippingReason = "PipelineRun Tasks timeout has been reached"
// FinallyTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Finally.
FinallyTimedOutSkip SkippingReason = "PipelineRun Finally timeout has been reached"
// EmptyArrayInMatrixParams means the task was skipped because Matrix parameters contain empty array.
EmptyArrayInMatrixParams SkippingReason = "Matrix Parameters have an empty array"
// None means the task was not skipped
None SkippingReason = "None"
)
// PipelineRunResult used to describe the results of a pipeline
type PipelineRunResult struct {
// Name is the result's name as declared by the Pipeline
Name string `json:"name"`
// Value is the result returned from the execution of this PipelineRun
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Value ResultValue `json:"value"`
}
// PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status
type PipelineRunTaskRunStatus struct {
// PipelineTaskName is the name of the PipelineTask.
PipelineTaskName string `json:"pipelineTaskName,omitempty"`
// Status is the TaskRunStatus for the corresponding TaskRun
// +optional
Status *TaskRunStatus `json:"status,omitempty"`
// WhenExpressions is the list of checks guarding the execution of the PipelineTask
// +optional
// +listType=atomic
WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"`
}
// PipelineRunRunStatus contains the name of the PipelineTask for this CustomRun or Run and the CustomRun or Run's Status
type PipelineRunRunStatus struct {
// PipelineTaskName is the name of the PipelineTask.
PipelineTaskName string `json:"pipelineTaskName,omitempty"`
// Status is the CustomRunStatus for the corresponding CustomRun or Run
// +optional
Status *CustomRunStatus `json:"status,omitempty"`
// WhenExpressions is the list of checks guarding the execution of the PipelineTask
// +optional
// +listType=atomic
WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PipelineRunList contains a list of PipelineRun
type PipelineRunList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []PipelineRun `json:"items,omitempty"`
}
// PipelineTaskRun reports the results of running a step in the Task. Each
// task has the potential to succeed or fail (based on the exit code)
// and produces logs.
type PipelineTaskRun struct {
Name string `json:"name,omitempty"`
}
// PipelineTaskRunSpec can be used to configure specific
// specs for a concrete Task
type PipelineTaskRunSpec struct {
PipelineTaskName string `json:"pipelineTaskName,omitempty"`
TaskServiceAccountName string `json:"taskServiceAccountName,omitempty"`
TaskPodTemplate *pod.PodTemplate `json:"taskPodTemplate,omitempty"`
// +listType=atomic
StepOverrides []TaskRunStepOverride `json:"stepOverrides,omitempty"`
// +listType=atomic
SidecarOverrides []TaskRunSidecarOverride `json:"sidecarOverrides,omitempty"`
// +optional
Metadata *PipelineTaskMetadata `json:"metadata,omitempty"`
// Compute resources to use for this TaskRun
ComputeResources *corev1.ResourceRequirements `json:"computeResources,omitempty"`
// Duration after which the TaskRun times out.
// Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
}
// GetTaskRunSpec returns the task specific spec for a given
// PipelineTask if configured, otherwise it returns the PipelineRun's default.
func (pr *PipelineRun) GetTaskRunSpec(pipelineTaskName string) PipelineTaskRunSpec {
s := PipelineTaskRunSpec{
PipelineTaskName: pipelineTaskName,
TaskServiceAccountName: pr.Spec.ServiceAccountName,
TaskPodTemplate: pr.Spec.PodTemplate,
}
for _, task := range pr.Spec.TaskRunSpecs {
if task.PipelineTaskName == pipelineTaskName {
// merge podTemplates specified in pipelineRun.spec.taskRunSpecs[].podTemplate and pipelineRun.spec.podTemplate
// with taskRunSpecs taking higher precedence
s.TaskPodTemplate = pod.MergePodTemplateWithDefault(task.TaskPodTemplate, s.TaskPodTemplate)
if task.TaskServiceAccountName != "" {
s.TaskServiceAccountName = task.TaskServiceAccountName
}
s.StepOverrides = task.StepOverrides
s.SidecarOverrides = task.SidecarOverrides
s.Metadata = task.Metadata
s.ComputeResources = task.ComputeResources
s.Timeout = task.Timeout
}
}
return s
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"strings"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/strings/slices"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var (
_ apis.Validatable = (*PipelineRun)(nil)
_ resourcesemantics.VerbLimited
)
// SupportedVerbs returns the operations that validation should be called for
func (pr *PipelineRun) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate pipelinerun
func (pr *PipelineRun) Validate(ctx context.Context) *apis.FieldError {
if apis.IsInDelete(ctx) {
return nil
}
errs := validate.ObjectMetadata(pr.GetObjectMeta()).ViaField("metadata")
if pr.IsPending() && pr.HasStarted() {
errs = errs.Also(apis.ErrInvalidValue("PipelineRun cannot be Pending after it is started", "spec.status"))
}
return errs.Also(pr.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec"))
}
// Validate pipelinerun spec
func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
// Validate the spec changes
errs = errs.Also(ps.ValidateUpdate(ctx))
// Must have exactly one of pipelineRef and pipelineSpec.
if ps.PipelineRef == nil && ps.PipelineSpec == nil {
errs = errs.Also(apis.ErrMissingOneOf("pipelineRef", "pipelineSpec"))
}
if ps.PipelineRef != nil && ps.PipelineSpec != nil {
errs = errs.Also(apis.ErrMultipleOneOf("pipelineRef", "pipelineSpec"))
}
// Validate PipelineRef if it's present
if ps.PipelineRef != nil {
errs = errs.Also(ps.PipelineRef.Validate(ctx).ViaField("pipelineRef"))
}
// Validate PipelineSpec if it's present
if ps.PipelineSpec != nil {
if slices.Contains(strings.Split(
config.FromContextOrDefaults(ctx).FeatureFlags.DisableInlineSpec, ","), "pipelinerun") {
errs = errs.Also(apis.ErrDisallowedFields("pipelineSpec"))
}
errs = errs.Also(ps.PipelineSpec.Validate(ctx).ViaField("pipelineSpec"))
}
// Validate PipelineRun parameters
errs = errs.Also(ps.validatePipelineRunParameters(ctx))
// Validate propagated parameters
errs = errs.Also(ps.validateInlineParameters(ctx))
// Validate propagated workspaces
errs = errs.Also(ps.validatePropagatedWorkspaces(ctx))
if ps.Timeout != nil {
// timeout should be a valid duration of at least 0.
if ps.Timeout.Duration < 0 {
errs = errs.Also(apis.ErrInvalidValue(ps.Timeout.Duration.String()+" should be >= 0", "timeout"))
}
}
if ps.Timeouts != nil {
if ps.Timeout != nil {
// can't have both at the same time
errs = errs.Also(apis.ErrDisallowedFields("timeout", "timeouts"))
}
// tasks timeout should be a valid duration of at least 0.
errs = errs.Also(validateTimeoutDuration("tasks", ps.Timeouts.Tasks))
// finally timeout should be a valid duration of at least 0.
errs = errs.Also(validateTimeoutDuration("finally", ps.Timeouts.Finally))
// pipeline timeout should be a valid duration of at least 0.
errs = errs.Also(validateTimeoutDuration("pipeline", ps.Timeouts.Pipeline))
if ps.Timeouts.Pipeline != nil {
errs = errs.Also(ps.validatePipelineTimeout(ps.Timeouts.Pipeline.Duration, "should be <= pipeline duration"))
} else {
defaultTimeout := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes)
errs = errs.Also(ps.validatePipelineTimeout(defaultTimeout, "should be <= default timeout duration"))
}
}
errs = errs.Also(validateSpecStatus(ps.Status))
if ps.Workspaces != nil {
wsNames := make(map[string]int)
for idx, ws := range ps.Workspaces {
errs = errs.Also(ws.Validate(ctx).ViaFieldIndex("workspaces", idx))
if prevIdx, alreadyExists := wsNames[ws.Name]; alreadyExists {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace %q provided by pipelinerun more than once, at index %d and %d", ws.Name, prevIdx, idx), "name").ViaFieldIndex("workspaces", idx))
}
wsNames[ws.Name] = idx
}
}
for idx, trs := range ps.TaskRunSpecs {
errs = errs.Also(validateTaskRunSpec(ctx, trs, ps.Timeouts).ViaIndex(idx).ViaField("taskRunSpecs"))
}
if ps.PodTemplate != nil {
errs = errs.Also(validatePodTemplateEnv(ctx, *ps.PodTemplate))
}
if ps.Resources != nil {
errs = errs.Also(apis.ErrDisallowedFields("resources"))
}
return errs
}
// ValidateUpdate validates the update of a PipelineRunSpec
func (ps *PipelineRunSpec) ValidateUpdate(ctx context.Context) (errs *apis.FieldError) {
if !apis.IsInUpdate(ctx) {
return
}
oldObj, ok := apis.GetBaseline(ctx).(*PipelineRun)
if !ok || oldObj == nil {
return
}
if (oldObj.Spec.ManagedBy == nil) != (ps.ManagedBy == nil) || (oldObj.Spec.ManagedBy != nil && *oldObj.Spec.ManagedBy != *ps.ManagedBy) {
errs = errs.Also(apis.ErrInvalidValue("managedBy is immutable", "spec.managedBy"))
}
if oldObj.IsDone() {
// try comparing without any copying first
// this handles the common case where only finalizers changed
if equality.Semantic.DeepEqual(&oldObj.Spec, ps) {
return nil // Specs identical, allow update
}
// Specs differ, this could be due to different defaults after upgrade
// Apply current defaults to old spec to normalize
oldCopy := oldObj.Spec.DeepCopy()
oldCopy.SetDefaults(ctx)
if equality.Semantic.DeepEqual(oldCopy, ps) {
return nil // Difference was only defaults, allow update
}
// Real spec changes detected, reject update
errs = errs.Also(apis.ErrInvalidValue("Once the PipelineRun is complete, no updates are allowed", ""))
return errs
}
// Handle started but not done case
old := oldObj.Spec.DeepCopy()
old.Status = ps.Status
old.ManagedBy = ps.ManagedBy // Already tested before
if !equality.Semantic.DeepEqual(old, ps) {
errs = errs.Also(apis.ErrInvalidValue("Once the PipelineRun has started, only status updates are allowed", ""))
}
return
}
func (ps *PipelineRunSpec) validatePipelineRunParameters(ctx context.Context) (errs *apis.FieldError) {
if len(ps.Params) == 0 {
return errs
}
// Validate parameter types and uniqueness
errs = errs.Also(ValidateParameters(ctx, ps.Params).ViaField("params"))
// Validate that task results aren't used in param values
for _, param := range ps.Params {
expressions, ok := GetVarSubstitutionExpressionsForParam(param)
if ok {
if LooksLikeContainsResultRefs(expressions) {
expressions = filter(expressions, resultref.LooksLikeResultRef)
resultRefs := NewResultRefs(expressions)
if len(resultRefs) > 0 {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("cannot use result expressions in %v as PipelineRun parameter values", expressions),
"value").ViaFieldKey("params", param.Name))
}
}
}
}
return errs
}
// validatePropagatedWorkspaces validates workspaces that are propagated.
func (ps *PipelineRunSpec) validatePropagatedWorkspaces(ctx context.Context) (errs *apis.FieldError) {
if ps.PipelineSpec == nil {
return errs
}
workspaceNames := sets.NewString()
for _, w := range ps.Workspaces {
workspaceNames.Insert(w.Name)
}
for _, w := range ps.PipelineSpec.Workspaces {
workspaceNames.Insert(w.Name)
}
for i, pt := range ps.PipelineSpec.Tasks {
for _, w := range pt.Workspaces {
workspaceNames.Insert(w.Name)
}
errs = errs.Also(pt.validateWorkspaces(workspaceNames).ViaIndex(i))
}
for i, pt := range ps.PipelineSpec.Finally {
for _, w := range pt.Workspaces {
workspaceNames.Insert(w.Name)
}
errs = errs.Also(pt.validateWorkspaces(workspaceNames).ViaIndex(i))
}
return errs
}
// validateInlineParameters validates parameters that are defined inline.
// This is crucial for propagated parameters since the parameters could
// be defined under pipelineRun and then called directly in the task steps.
// In this case, parameters cannot be validated by the underlying pipelineSpec
// or taskSpec since they may not have the parameters declared because of propagation.
func (ps *PipelineRunSpec) validateInlineParameters(ctx context.Context) (errs *apis.FieldError) {
if ps.PipelineSpec == nil {
return errs
}
paramSpecForValidation := make(map[string]ParamSpec)
for _, p := range ps.Params {
paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation)
}
for _, p := range ps.PipelineSpec.Params {
var err *apis.FieldError
paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation)
if err != nil {
errs = errs.Also(err)
}
}
for _, pt := range ps.PipelineSpec.Tasks {
paramSpecForValidation = appendPipelineTaskParams(paramSpecForValidation, pt.Params)
if pt.TaskSpec != nil && pt.TaskSpec.Params != nil {
for _, p := range pt.TaskSpec.Params {
var err *apis.FieldError
paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation)
if err != nil {
errs = errs.Also(err)
}
}
}
}
var paramSpec []ParamSpec
for _, v := range paramSpecForValidation {
paramSpec = append(paramSpec, v)
}
if ps.PipelineSpec != nil && ps.PipelineSpec.Tasks != nil {
for _, pt := range ps.PipelineSpec.Tasks {
if pt.TaskSpec != nil && pt.TaskSpec.Steps != nil {
errs = errs.Also(ValidateParameterTypes(ctx, paramSpec))
errs = errs.Also(ValidateParameterVariables(ctx, pt.TaskSpec.Steps, paramSpec))
errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, pt.TaskSpec.Steps, paramSpec))
}
}
errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.PipelineSpec.Tasks, paramSpec))
errs = errs.Also(validatePipelineTaskParameterUsage(ps.PipelineSpec.Tasks, paramSpec))
}
return errs
}
func appendPipelineTaskParams(paramSpecForValidation map[string]ParamSpec, params Params) map[string]ParamSpec {
for _, p := range params {
if pSpec, ok := paramSpecForValidation[p.Name]; ok {
if p.Value.ObjectVal != nil {
for k, v := range p.Value.ObjectVal {
pSpec.Default.ObjectVal[k] = v
pSpec.Properties[k] = PropertySpec{Type: ParamTypeString}
}
}
paramSpecForValidation[p.Name] = pSpec
} else {
paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation)
}
}
return paramSpecForValidation
}
func validateSpecStatus(status PipelineRunSpecStatus) *apis.FieldError {
switch status {
case "":
return nil
case PipelineRunSpecStatusPending:
return nil
case PipelineRunSpecStatusCancelled,
PipelineRunSpecStatusCancelledRunFinally,
PipelineRunSpecStatusStoppedRunFinally:
return nil
}
return apis.ErrInvalidValue(fmt.Sprintf("%s should be %s, %s, %s or %s", status,
PipelineRunSpecStatusCancelled,
PipelineRunSpecStatusCancelledRunFinally,
PipelineRunSpecStatusStoppedRunFinally,
PipelineRunSpecStatusPending), "status")
}
func validateTimeoutDuration(field string, d *metav1.Duration) (errs *apis.FieldError) {
if d != nil && d.Duration < 0 {
fieldPath := "timeouts." + field
return errs.Also(apis.ErrInvalidValue(d.Duration.String()+" should be >= 0", fieldPath))
}
return nil
}
func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorMsg string) (errs *apis.FieldError) {
if ps.Timeouts.Tasks != nil {
tasksTimeoutErr := false
tasksTimeoutStr := ps.Timeouts.Tasks.Duration.String()
if ps.Timeouts.Tasks.Duration > timeout && timeout != config.NoTimeoutDuration {
tasksTimeoutErr = true
}
if ps.Timeouts.Tasks.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration {
tasksTimeoutErr = true
tasksTimeoutStr += " (no timeout)"
}
if tasksTimeoutErr {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s %s", tasksTimeoutStr, errorMsg), "timeouts.tasks"))
}
}
if ps.Timeouts.Finally != nil {
finallyTimeoutErr := false
finallyTimeoutStr := ps.Timeouts.Finally.Duration.String()
if ps.Timeouts.Finally.Duration > timeout && timeout != config.NoTimeoutDuration {
finallyTimeoutErr = true
}
if ps.Timeouts.Finally.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration {
finallyTimeoutErr = true
finallyTimeoutStr += " (no timeout)"
}
if finallyTimeoutErr {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s %s", finallyTimeoutStr, errorMsg), "timeouts.finally"))
}
}
if ps.Timeouts.Tasks != nil && ps.Timeouts.Finally != nil {
if ps.Timeouts.Tasks.Duration+ps.Timeouts.Finally.Duration > timeout {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s + %s %s", ps.Timeouts.Tasks.Duration.String(), ps.Timeouts.Finally.Duration.String(), errorMsg), "timeouts.tasks"))
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s + %s %s", ps.Timeouts.Tasks.Duration.String(), ps.Timeouts.Finally.Duration.String(), errorMsg), "timeouts.finally"))
}
}
return errs
}
func validateTaskRunSpec(ctx context.Context, trs PipelineTaskRunSpec, pipelineTimeouts *TimeoutFields) (errs *apis.FieldError) {
if trs.StepOverrides != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepOverrides", config.BetaAPIFields).ViaField("stepOverrides"))
errs = errs.Also(validateStepOverrides(trs.StepOverrides).ViaField("stepOverrides"))
}
if trs.SidecarOverrides != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.BetaAPIFields).ViaField("sidecarOverrides"))
errs = errs.Also(validateSidecarOverrides(trs.SidecarOverrides).ViaField("sidecarOverrides"))
}
if trs.ComputeResources != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "computeResources", config.BetaAPIFields).ViaField("computeResources"))
errs = errs.Also(validateTaskRunComputeResources(trs.ComputeResources, trs.StepOverrides))
}
if trs.TaskPodTemplate != nil {
errs = errs.Also(validatePodTemplateEnv(ctx, *trs.TaskPodTemplate))
}
// Check taskRunSpec timeout against pipeline limits
errs = errs.Also(validateTaskRunSpecTimeout(ctx, trs.Timeout, pipelineTimeouts))
return errs
}
// validateTaskRunSpecTimeout validates a TaskRunSpec's timeout against pipeline timeouts.
// This function works in isolation and doesn't rely on previous validation steps.
func validateTaskRunSpecTimeout(ctx context.Context, timeout *metav1.Duration, pipelineTimeouts *TimeoutFields) *apis.FieldError {
if timeout == nil {
return nil
}
cfg := config.FromContextOrDefaults(ctx)
var errs *apis.FieldError
// Validate basic timeout (negative values)
_, err := validateTimeout(timeout, cfg.Defaults.DefaultTimeoutMinutes)
if err != nil {
errs = errs.Also(err)
}
// Validate timeout against effective pipeline timeout (explicit or default)
if err == nil {
// Find applicable timeout limit: Tasks -> Pipeline -> Default (60min)
var maxTimeout *metav1.Duration
var timeoutSource string
switch {
case pipelineTimeouts != nil && pipelineTimeouts.Tasks != nil:
if validatedTimeout, err := validateTimeout(pipelineTimeouts.Tasks, cfg.Defaults.DefaultTimeoutMinutes); err != nil {
// Add error if Tasks timeout is invalid (prevents silent failures)
errs = errs.Also(err)
} else {
maxTimeout = validatedTimeout
timeoutSource = "pipeline tasks duration"
}
case pipelineTimeouts != nil && pipelineTimeouts.Pipeline != nil:
if validatedTimeout, err := validateTimeout(pipelineTimeouts.Pipeline, cfg.Defaults.DefaultTimeoutMinutes); err != nil {
// Add error if Pipeline timeout is invalid (prevents silent failures)
errs = errs.Also(err)
} else {
maxTimeout = validatedTimeout
timeoutSource = "pipeline duration"
}
default:
maxTimeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute}
timeoutSource = "default pipeline duration"
}
// Always check against max timeout if it's not "no timeout"
if maxTimeout != nil && maxTimeout.Duration != config.NoTimeoutDuration {
taskRunTimeout, _ := validateTimeout(timeout, cfg.Defaults.DefaultTimeoutMinutes) // We know this won't error from above
if taskRunTimeout.Duration > maxTimeout.Duration {
errs = errs.Also(apis.ErrInvalidValue(
fmt.Sprintf("%s should be <= %s %s", taskRunTimeout.Duration, timeoutSource, maxTimeout.Duration),
"timeout"))
}
}
}
return errs
}
// validateTimeout validates a timeout field and returns the validated timeout with defaults applied.
// If timeout is nil, returns default timeout. If timeout is negative, returns an error.
func validateTimeout(timeout *metav1.Duration, defaultTimeoutMinutes int) (*metav1.Duration, *apis.FieldError) {
if timeout == nil {
return &metav1.Duration{Duration: time.Duration(defaultTimeoutMinutes) * time.Minute}, nil
}
if timeout.Duration < 0 {
return nil, apis.ErrInvalidValue(timeout.Duration.String()+" should be >= 0", "timeout")
}
return timeout, nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
)
func (p Provenance) convertTo(ctx context.Context, sink *v1.Provenance) {
if p.RefSource != nil {
new := v1.RefSource{}
p.RefSource.convertTo(ctx, &new)
sink.RefSource = &new
}
if p.FeatureFlags != nil {
sink.FeatureFlags = p.FeatureFlags
}
}
func (p *Provenance) convertFrom(ctx context.Context, source v1.Provenance) {
if source.RefSource != nil {
new := RefSource{}
new.convertFrom(ctx, *source.RefSource)
p.RefSource = &new
}
if source.FeatureFlags != nil {
p.FeatureFlags = source.FeatureFlags
}
}
func (cs RefSource) convertTo(ctx context.Context, sink *v1.RefSource) {
sink.URI = cs.URI
sink.Digest = cs.Digest
sink.EntryPoint = cs.EntryPoint
}
func (cs *RefSource) convertFrom(ctx context.Context, source v1.RefSource) {
cs.URI = source.URI
cs.Digest = source.Digest
cs.EntryPoint = source.EntryPoint
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: pipeline.GroupName, Version: "v1beta1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme adds Build types to the scheme.
AddToScheme = schemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Task{},
&TaskList{},
&Pipeline{},
&PipelineList{},
&TaskRun{},
&TaskRunList{},
&PipelineRun{},
&PipelineRunList{},
&CustomRun{},
&CustomRunList{},
&StepAction{},
&StepActionList{},
)
// &Condition{},
// &ConditionList{},
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
)
func (rr ResolverRef) convertTo(ctx context.Context, sink *v1.ResolverRef) {
sink.Resolver = v1.ResolverName(rr.Resolver)
sink.Params = nil
for _, r := range rr.Params {
new := v1.Param{}
r.convertTo(ctx, &new)
sink.Params = append(sink.Params, new)
}
}
func (rr *ResolverRef) convertFrom(ctx context.Context, source v1.ResolverRef) {
rr.Resolver = ResolverName(source.Resolver)
rr.Params = nil
for _, r := range source.Params {
new := Param{}
new.ConvertFrom(ctx, r)
rr.Params = append(rr.Params, new)
}
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
)
func (r TaskResult) convertTo(ctx context.Context, sink *v1.TaskResult) {
sink.Name = r.Name
sink.Type = v1.ResultsType(r.Type)
sink.Description = r.Description
if r.Properties != nil {
properties := make(map[string]v1.PropertySpec)
for k, v := range r.Properties {
properties[k] = v1.PropertySpec{Type: v1.ParamType(v.Type)}
}
sink.Properties = properties
}
if r.Value != nil {
sink.Value = &v1.ParamValue{}
r.Value.convertTo(ctx, sink.Value)
}
}
func (r *TaskResult) convertFrom(ctx context.Context, source v1.TaskResult) {
r.Name = source.Name
r.Type = ResultsType(source.Type)
r.Description = source.Description
if source.Properties != nil {
properties := make(map[string]PropertySpec)
for k, v := range source.Properties {
properties[k] = PropertySpec{Type: ParamType(v.Type)}
}
r.Properties = properties
}
if source.Value != nil {
r.Value = &ParamValue{}
r.Value.convertFrom(ctx, *source.Value)
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import "context"
// SetDefaults set the default type for TaskResult
func (tr *TaskResult) SetDefaults(context.Context) {
if tr == nil {
return
}
if tr.Type == "" {
if tr.Properties != nil {
// Set type to object if `properties` is given
tr.Type = ResultsTypeObject
} else {
// ResultsTypeString is the default value
tr.Type = ResultsTypeString
}
}
// Set default type of object values to string
for key, propertySpec := range tr.Properties {
if propertySpec.Type == "" {
tr.Properties[key] = PropertySpec{Type: ParamType(ResultsTypeString)}
}
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import "strings"
// TaskResult used to describe the results of a task
type TaskResult struct {
// Name the given name
Name string `json:"name"`
// Type is the user-specified type of the result. The possible type
// is currently "string" and will support "array" in following work.
// +optional
Type ResultsType `json:"type,omitempty"`
// Properties is the JSON Schema properties to support key-value pairs results.
// +optional
Properties map[string]PropertySpec `json:"properties,omitempty"`
// Description is a human-readable description of the result
// +optional
Description string `json:"description,omitempty"`
// Value the expression used to retrieve the value of the result from an underlying Step.
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Value *ResultValue `json:"value,omitempty"`
}
// TaskRunResult used to describe the results of a task
type TaskRunResult struct {
// Name the given name
Name string `json:"name"`
// Type is the user-specified type of the result. The possible type
// is currently "string" and will support "array" in following work.
// +optional
Type ResultsType `json:"type,omitempty"`
// Value the given value of the result
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Value ResultValue `json:"value"`
}
// TaskRunStepResult is a type alias of TaskRunResult
type TaskRunStepResult = TaskRunResult
// ResultValue is a type alias of ParamValue
type ResultValue = ParamValue
// ResultsType indicates the type of a result;
// Used to distinguish between a single string and an array of strings.
// Note that there is ResultType used to find out whether a
// RunResult is from a task result or not, which is different from
// this ResultsType.
type ResultsType string
// Valid ResultsType:
const (
ResultsTypeString ResultsType = "string"
ResultsTypeArray ResultsType = "array"
ResultsTypeObject ResultsType = "object"
)
// AllResultsTypes can be used for ResultsTypes validation.
var AllResultsTypes = []ResultsType{ResultsTypeString, ResultsTypeArray, ResultsTypeObject}
// ResultsArrayReference returns the reference of the result. e.g. results.resultname from $(results.resultname[*])
func ResultsArrayReference(a string) string {
return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(a, "$("), ")"), "[*]")
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
)
// Validate implements apis.Validatable
func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) {
if !resultNameFormatRegex.MatchString(tr.Name) {
return apis.ErrInvalidKeyName(tr.Name, "name", fmt.Sprintf("Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat))
}
switch {
case tr.Type == ResultsTypeObject:
errs = errs.Also(validateObjectResult(tr))
case tr.Type == ResultsTypeArray:
// Resources created before the result. Type was introduced may not have Type set
// and should be considered valid
case tr.Type == "":
// By default, the result type is string
case tr.Type != ResultsTypeString:
errs = errs.Also(apis.ErrInvalidValue(tr.Type, "type", "type must be string"))
}
return errs.Also(tr.validateValue(ctx))
}
// validateObjectResult validates the object result and check if the Properties is missing
// for Properties values it will check if the type is string.
func validateObjectResult(tr TaskResult) (errs *apis.FieldError) {
if ParamType(tr.Type) == ParamTypeObject && tr.Properties == nil {
return apis.ErrMissingField(tr.Name + ".properties")
}
invalidKeys := []string{}
for key, propertySpec := range tr.Properties {
if propertySpec.Type != ParamTypeString {
invalidKeys = append(invalidKeys, key)
}
}
if len(invalidKeys) != 0 {
return &apis.FieldError{
Message: fmt.Sprintf("The value type specified for these keys %v is invalid, the type must be string", invalidKeys),
Paths: []string{tr.Name + ".properties"},
}
}
return nil
}
// validateValue validates the value of the TaskResult.
// It requires the value is of type string
// and format $(steps.<stepName>.results.<resultName>)
func (tr TaskResult) validateValue(ctx context.Context) (errs *apis.FieldError) {
if tr.Value == nil {
return nil
}
if tr.Value.Type != ParamTypeString {
return &apis.FieldError{
Message: fmt.Sprintf(
"Invalid Type. Wanted string but got: \"%v\"", tr.Value.Type),
Paths: []string{
tr.Name + ".type",
},
}
}
if tr.Value.StringVal != "" {
stepName, resultName, err := v1.ExtractStepResultName(tr.Value.StringVal)
if err != nil {
return &apis.FieldError{
Message: err.Error(),
Paths: []string{tr.Name + ".value"},
}
}
if e := validation.IsDNS1123Label(stepName); len(e) > 0 {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("invalid extracted step name %q", stepName),
Paths: []string{tr.Name + ".value"},
Details: "stepName in $(steps.<stepName>.results.<resultName>) must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
})
}
if !resultNameFormatRegex.MatchString(resultName) {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("invalid extracted result name %q", resultName),
Paths: []string{tr.Name + ".value"},
Details: fmt.Sprintf("resultName in $(steps.<stepName>.results.<resultName>) must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat),
})
}
}
return errs
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"regexp"
"strings"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
)
// ResultRef is a type that represents a reference to a task run result
type ResultRef struct {
PipelineTask string `json:"pipelineTask"`
Result string `json:"result"`
ResultsIndex *int `json:"resultsIndex"`
Property string `json:"property"`
}
const (
// ResultTaskPart Constant used to define the "tasks" part of a pipeline result reference
// retained because of backwards compatibility
ResultTaskPart = resultref.ResultTaskPart
// ResultFinallyPart Constant used to define the "finally" part of a pipeline result reference
// retained because of backwards compatibility
ResultFinallyPart = resultref.ResultFinallyPart
// ResultResultPart Constant used to define the "results" part of a pipeline result reference
// retained because of backwards compatibility
ResultResultPart = resultref.ResultResultPart
// TODO(#2462) use one regex across all substitutions
// variableSubstitutionFormat matches format like $result.resultname, $result.resultname[int] and $result.resultname[*]
variableSubstitutionFormat = `\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)`
// exactVariableSubstitutionFormat matches strings that only contain a single reference to result or param variables, but nothing else
// i.e. `$(result.resultname)` is a match, but `foo $(result.resultname)` is not.
exactVariableSubstitutionFormat = `^\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)$`
// ResultNameFormat Constant used to define the regex Result.Name should follow
ResultNameFormat = `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$`
)
// VariableSubstitutionRegex is a regex to find all result matching substitutions
var VariableSubstitutionRegex = regexp.MustCompile(variableSubstitutionFormat)
var exactVariableSubstitutionRegex = regexp.MustCompile(exactVariableSubstitutionFormat)
var resultNameFormatRegex = regexp.MustCompile(ResultNameFormat)
// NewResultRefs extracts all ResultReferences from a param or a pipeline result.
// If the ResultReference can be extracted, they are returned. Expressions which are not
// results are ignored.
func NewResultRefs(expressions []string) []*ResultRef {
var resultRefs []*ResultRef
for _, expression := range expressions {
pr, err := resultref.ParseTaskExpression(expression)
// If the expression isn't a result but is some other expression,
// parseExpression will return an error, in which case we just skip that expression,
// since although it's not a result ref, it might be some other kind of reference
if err == nil {
resultRefs = append(resultRefs, &ResultRef{
PipelineTask: pr.ResourceName,
Result: pr.ResultName,
ResultsIndex: pr.ArrayIdx,
Property: pr.ObjectKey,
})
}
}
return resultRefs
}
// LooksLikeContainsResultRefs attempts to check if param or a pipeline result looks like it contains any
// result references.
// This is useful if we want to make sure the param looks like a ResultReference before
// performing strict validation
func LooksLikeContainsResultRefs(expressions []string) bool {
for _, expression := range expressions {
if resultref.LooksLikeResultRef(expression) {
return true
}
}
return false
}
// GetVarSubstitutionExpressionsForParam extracts all the value between "$(" and ")"" for a parameter
func GetVarSubstitutionExpressionsForParam(param Param) ([]string, bool) {
var allExpressions []string
switch param.Value.Type {
case ParamTypeArray:
// array type
for _, value := range param.Value.ArrayVal {
allExpressions = append(allExpressions, validateString(value)...)
}
case ParamTypeString:
// string type
allExpressions = append(allExpressions, validateString(param.Value.StringVal)...)
case ParamTypeObject:
// object type
for _, value := range param.Value.ObjectVal {
allExpressions = append(allExpressions, validateString(value)...)
}
default:
return nil, false
}
return allExpressions, len(allExpressions) != 0
}
// GetVarSubstitutionExpressionsForPipelineResult extracts all the value between "$(" and ")"" for a pipeline result
func GetVarSubstitutionExpressionsForPipelineResult(result PipelineResult) ([]string, bool) {
allExpressions := validateString(result.Value.StringVal)
for _, v := range result.Value.ArrayVal {
allExpressions = append(allExpressions, validateString(v)...)
}
for _, v := range result.Value.ObjectVal {
allExpressions = append(allExpressions, validateString(v)...)
}
return allExpressions, len(allExpressions) != 0
}
func validateString(value string) []string {
expressions := VariableSubstitutionRegex.FindAllString(value, -1)
if expressions == nil {
return nil
}
var result []string
for _, expression := range expressions {
result = append(result, stripVarSubExpression(expression))
}
return result
}
func stripVarSubExpression(expression string) string {
return strings.TrimSuffix(strings.TrimPrefix(expression, "$("), ")")
}
// ParseResultName parse the input string to extract resultName and result index.
// Array indexing:
// Input: anArrayResult[1]
// Output: anArrayResult, "1"
// Array star reference:
// Input: anArrayResult[*]
// Output: anArrayResult, "*"
func ParseResultName(resultName string) (string, string) {
return resultref.ParseResultName(resultName)
}
// PipelineTaskResultRefs walks all the places a result reference can be used
// in a PipelineTask and returns a list of any references that are found.
func PipelineTaskResultRefs(pt *PipelineTask) []*ResultRef {
refs := []*ResultRef{}
for _, p := range pt.extractAllParams() {
expressions, _ := GetVarSubstitutionExpressionsForParam(p)
refs = append(refs, NewResultRefs(expressions)...)
}
for _, whenExpression := range pt.WhenExpressions {
expressions, _ := whenExpression.GetVarSubstitutionExpressions()
refs = append(refs, NewResultRefs(expressions)...)
}
return refs
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"knative.dev/pkg/apis"
)
var _ apis.Convertible = (*StepAction)(nil)
// ConvertTo implements apis.Convertible
func (s *StepAction) ConvertTo(ctx context.Context, to apis.Convertible) error {
return nil
}
// ConvertTo implements apis.Convertible
func (ss *StepActionSpec) ConvertTo(ctx context.Context, sink *StepActionSpec) error {
return nil
}
// ConvertFrom implements apis.Convertible
func (s *StepAction) ConvertFrom(ctx context.Context, from apis.Convertible) error {
return nil
}
// ConvertFrom implements apis.Convertible
func (ss *StepActionSpec) ConvertFrom(ctx context.Context, source *StepActionSpec) error {
return nil
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"knative.dev/pkg/apis"
)
var _ apis.Defaultable = (*StepAction)(nil)
// SetDefaults implements apis.Defaultable
func (s *StepAction) SetDefaults(ctx context.Context) {
s.Spec.SetDefaults(ctx)
}
// SetDefaults set any defaults for the StepAction spec
func (ss *StepActionSpec) SetDefaults(ctx context.Context) {
for i := range ss.Params {
ss.Params[i].SetDefaults(ctx)
}
for i := range ss.Results {
ss.Results[i].SetDefaults(ctx)
}
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/apis"
"knative.dev/pkg/kmeta"
)
// +genclient
// +genclient:noStatus
// +genreconciler:krshapedlogic=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// StepAction represents the actionable components of Step.
// The Step can only reference it from the cluster or using remote resolution.
//
// +k8s:openapi-gen=true
// +kubebuilder:storageversion
type StepAction struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata"`
// Spec holds the desired state of the Step from the client
// +optional
Spec StepActionSpec `json:"spec"`
}
var _ kmeta.OwnerRefable = (*StepAction)(nil)
// StepAction returns the step action's spec
func (s *StepAction) StepActionSpec() StepActionSpec {
return s.Spec
}
// StepActionMetadata returns the step action's ObjectMeta
func (s *StepAction) StepActionMetadata() metav1.ObjectMeta {
return s.ObjectMeta
}
// Copy returns a deep copy of the stepaction
func (s *StepAction) Copy() StepActionObject {
return s.DeepCopy()
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*StepAction) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind("StepAction")
}
// Checksum computes the sha256 checksum of the stepaction object.
// Prior to computing the checksum, it performs some preprocessing on the
// metadata of the object where it removes system provided annotations.
// Only the name, namespace, generateName, user-provided labels and annotations
// and the taskSpec are included for the checksum computation.
func (s *StepAction) Checksum() ([]byte, error) {
objectMeta := checksum.PrepareObjectMeta(s)
preprocessedStepaction := StepAction{
TypeMeta: metav1.TypeMeta{
APIVersion: "tekton.dev/v1beta1",
Kind: "StepAction",
},
ObjectMeta: objectMeta,
Spec: s.Spec,
}
sha256Checksum, err := checksum.ComputeSha256Checksum(preprocessedStepaction)
if err != nil {
return nil, err
}
return sha256Checksum, nil
}
// StepActionList contains a list of StepActions
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type StepActionList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []StepAction `json:"items"`
}
// +listType=atomic
type Args []string
// StepActionSpec contains the actionable components of a step.
type StepActionSpec struct {
// Description is a user-facing description of the stepaction that may be
// used to populate a UI.
// +optional
Description string `json:"description,omitempty"`
// Image reference name to run for this StepAction.
// More info: https://kubernetes.io/docs/concepts/containers/images
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
// +listType=atomic
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args Args `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
// +listType=atomic
Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge" protobuf:"bytes,7,rep,name=env"`
// Script is the contents of an executable file to execute.
//
// If Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.
// +optional
Script string `json:"script,omitempty"`
// Step's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// Params is a list of input parameters required to run the stepAction.
// Params must be supplied as inputs in Steps unless they declare a defaultvalue.
// +optional
Params v1.ParamSpecs `json:"params,omitempty"`
// Results are values that this StepAction can output
// +optional
// +listType=atomic
Results []v1.StepResult `json:"results,omitempty"`
// SecurityContext defines the security options the Step should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// The value set in StepAction will take precedence over the value from Task.
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Volumes to mount into the Step's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
// +listType=atomic
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchMergeKey:"mountPath" patchStrategy:"merge" protobuf:"bytes,9,rep,name=volumeMounts"`
}
// ToStep converts the StepActionSpec to a Step struct
func (ss *StepActionSpec) ToStep() *v1.Step {
return &v1.Step{
Image: ss.Image,
Command: ss.Command,
Args: ss.Args,
WorkingDir: ss.WorkingDir,
Script: ss.Script,
Env: ss.Env,
VolumeMounts: ss.VolumeMounts,
SecurityContext: ss.SecurityContext,
Results: ss.Results,
}
}
// StepActionObject is implemented by StepAction
type StepActionObject interface {
apis.Defaultable
StepActionMetadata() metav1.ObjectMeta
StepActionSpec() StepActionSpec
Copy() StepActionObject
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/substitution"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var (
_ apis.Validatable = (*StepAction)(nil)
_ resourcesemantics.VerbLimited = (*StepAction)(nil)
)
// SupportedVerbs returns the operations that validation should be called for
func (s *StepAction) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate implements apis.Validatable
func (s *StepAction) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = validate.ObjectMetadata(s.GetObjectMeta()).ViaField("metadata")
errs = errs.Also(s.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec"))
return errs
}
// Validate implements apis.Validatable
func (ss *StepActionSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
if ss.Image == "" {
errs = errs.Also(apis.ErrMissingField("Image"))
}
if ss.Script != "" {
if len(ss.Command) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "script cannot be used with command",
Paths: []string{"script"},
})
}
cleaned := strings.TrimSpace(ss.Script)
if strings.HasPrefix(cleaned, "#!win") {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script"))
}
errs = errs.Also(validateNoParamSubstitutionsInScript(ss.Script))
}
errs = errs.Also(validateUsageOfDeclaredParameters(ctx, *ss))
errs = errs.Also(v1.ValidateParameterTypes(ctx, ss.Params).ViaField("params"))
errs = errs.Also(validateParameterVariables(ctx, *ss, ss.Params))
errs = errs.Also(v1.ValidateStepResultsVariables(ctx, ss.Results, ss.Script))
errs = errs.Also(v1.ValidateStepResults(ctx, ss.Results).ViaField("results"))
errs = errs.Also(validateVolumeMounts(ss.VolumeMounts, ss.Params).ViaField("volumeMounts"))
return errs
}
// validateNoParamSubstitutionsInScript validates that param substitutions are not invoked in the script
func validateNoParamSubstitutionsInScript(script string) *apis.FieldError {
_, present, errString := substitution.ExtractVariablesFromString(script, "params")
if errString != "" || present {
return &apis.FieldError{
Message: "param substitution in scripts is not allowed.",
Paths: []string{"script"},
}
}
return nil
}
// validateUsageOfDeclaredParameters validates that all parameters referenced in the Task are declared by the Task.
func validateUsageOfDeclaredParameters(ctx context.Context, sas StepActionSpec) *apis.FieldError {
params := sas.Params
var errs *apis.FieldError
_, _, objectParams := params.SortByType()
allParameterNames := sets.NewString(params.GetNames()...)
errs = errs.Also(validateStepActionVariables(ctx, sas, "params", allParameterNames))
errs = errs.Also(ValidateObjectUsage(ctx, sas, objectParams))
errs = errs.Also(v1.ValidateObjectParamsHaveProperties(ctx, params))
return errs
}
func validateVolumeMounts(volumeMounts []corev1.VolumeMount, params v1.ParamSpecs) (errs *apis.FieldError) {
if len(volumeMounts) == 0 {
return
}
paramNames := sets.String{}
for _, p := range params {
paramNames.Insert(p.Name)
}
for idx, v := range volumeMounts {
matches, _ := substitution.ExtractVariableExpressions(v.Name, "params")
if len(matches) != 1 {
errs = errs.Also(apis.ErrInvalidValue(v.Name, "name", "expect the Name to be a single param reference").ViaIndex(idx))
return errs
} else if matches[0] != v.Name {
errs = errs.Also(apis.ErrInvalidValue(v.Name, "name", "expect the Name to be a single param reference").ViaIndex(idx))
return errs
}
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.Name, "params", paramNames).ViaIndex(idx))
}
return errs
}
// validateParameterVariables validates all variables within a slice of ParamSpecs against a StepAction
func validateParameterVariables(ctx context.Context, sas StepActionSpec, params v1.ParamSpecs) *apis.FieldError {
var errs *apis.FieldError
errs = errs.Also(params.ValidateNoDuplicateNames())
stringParams, arrayParams, objectParams := params.SortByType()
stringParameterNames := sets.NewString(stringParams.GetNames()...)
arrayParameterNames := sets.NewString(arrayParams.GetNames()...)
errs = errs.Also(v1.ValidateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParams))
return errs.Also(validateStepActionArrayUsage(sas, "params", arrayParameterNames))
}
// ValidateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object
func ValidateObjectUsage(ctx context.Context, sas StepActionSpec, params v1.ParamSpecs) (errs *apis.FieldError) {
objectParameterNames := sets.NewString()
for _, p := range params {
// collect all names of object type params
objectParameterNames.Insert(p.Name)
// collect all keys for this object param
objectKeys := sets.NewString()
for key := range p.Properties {
objectKeys.Insert(key)
}
// check if the object's key names are referenced correctly i.e. param.objectParam.key1
errs = errs.Also(validateStepActionVariables(ctx, sas, "params\\."+p.Name, objectKeys))
}
return errs.Also(validateStepActionObjectUsageAsWhole(sas, "params", objectParameterNames))
}
// validateStepActionObjectUsageAsWhole returns an error if the StepAction contains references to the entire input object params in fields where these references are prohibited
func validateStepActionObjectUsageAsWhole(sas StepActionSpec, prefix string, vars sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToEntireProhibitedVariables(sas.Image, prefix, vars).ViaField("image")
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(sas.Script, prefix, vars).ViaField("script"))
for i, cmd := range sas.Command {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(cmd, prefix, vars).ViaFieldIndex("command", i))
}
for i, arg := range sas.Args {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(arg, prefix, vars).ViaFieldIndex("args", i))
}
for _, env := range sas.Env {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name))
}
for i, vm := range sas.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(vm.Name, prefix, vars).ViaFieldIndex("volumeMounts", i))
}
return errs
}
// validateStepActionArrayUsage returns an error if the Step contains references to the input array params in fields where these references are prohibited
func validateStepActionArrayUsage(sas StepActionSpec, prefix string, arrayParamNames sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToProhibitedVariables(sas.Image, prefix, arrayParamNames).ViaField("image")
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(sas.Script, prefix, arrayParamNames).ViaField("script"))
for i, cmd := range sas.Command {
errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(cmd, prefix, arrayParamNames).ViaFieldIndex("command", i))
}
for i, arg := range sas.Args {
errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(arg, prefix, arrayParamNames).ViaFieldIndex("args", i))
}
for _, env := range sas.Env {
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(env.Value, prefix, arrayParamNames).ViaFieldKey("env", env.Name))
}
for i, vm := range sas.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(vm.Name, prefix, arrayParamNames).ViaFieldIndex("volumeMounts", i))
}
return errs
}
// validateStepActionVariables returns an error if the StepAction contains references to any unknown variables
func validateStepActionVariables(ctx context.Context, sas StepActionSpec, prefix string, vars sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToUnknownVariables(sas.Image, prefix, vars).ViaField("image")
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(sas.Script, prefix, vars).ViaField("script"))
for i, cmd := range sas.Command {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(cmd, prefix, vars).ViaFieldIndex("command", i))
}
for i, arg := range sas.Args {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(arg, prefix, vars).ViaFieldIndex("args", i))
}
for _, env := range sas.Env {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name))
}
for i, vm := range sas.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(vm.Name, prefix, vars).ViaFieldIndex("volumeMounts", i))
}
return errs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/tektoncd/pipeline/pkg/apis/version"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"knative.dev/pkg/apis"
)
// TaskDeprecationsAnnotationKey is the annotation key for all deprecated fields of (a) Task(s) that belong(s) to an object.
// For example: a v1beta1.Pipeline contains two tasks
//
// spec:
//
// tasks:
// - name: task-1
// stepTemplate:
// name: deprecated-name-field # deprecated field
// - name: task-2
// steps:
// - tty: true # deprecated field
//
// The annotation would be:
//
// "tekton.dev/v1beta1.task-deprecations": `{
// "task1":{
// "deprecatedStepTemplates":{
// "name":"deprecated-name-field"
// },
// },
// "task-2":{
// "deprecatedSteps":[{"tty":true}],
// },
// }`
const (
TaskDeprecationsAnnotationKey = "tekton.dev/v1beta1.task-deprecations"
resourcesAnnotationKey = "tekton.dev/v1beta1Resources"
)
var _ apis.Convertible = (*Task)(nil)
// ConvertTo implements apis.Convertible
func (t *Task) ConvertTo(ctx context.Context, to apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
switch sink := to.(type) {
case *v1.Task:
sink.ObjectMeta = t.ObjectMeta
if err := serializeResources(&sink.ObjectMeta, &t.Spec); err != nil {
return err
}
return t.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta, t.Name)
default:
return fmt.Errorf("unknown version, got: %T", sink)
}
}
// ConvertTo implements apis.Convertible
func (ts *TaskSpec) ConvertTo(ctx context.Context, sink *v1.TaskSpec, meta *metav1.ObjectMeta, taskName string) error {
if err := serializeTaskDeprecations(meta, ts, taskName); err != nil {
return err
}
sink.Steps = nil
for _, s := range ts.Steps {
new := v1.Step{}
s.convertTo(ctx, &new)
sink.Steps = append(sink.Steps, new)
}
sink.Volumes = v1.Volumes(ts.Volumes)
if ts.StepTemplate != nil {
new := v1.StepTemplate{}
ts.StepTemplate.convertTo(ctx, &new)
sink.StepTemplate = &new
}
sink.Sidecars = nil
for _, s := range ts.Sidecars {
new := v1.Sidecar{}
s.convertTo(ctx, &new)
sink.Sidecars = append(sink.Sidecars, new)
}
sink.Workspaces = nil
for _, w := range ts.Workspaces {
new := v1.WorkspaceDeclaration{}
w.convertTo(ctx, &new)
sink.Workspaces = append(sink.Workspaces, new)
}
sink.Results = nil
for _, r := range ts.Results {
new := v1.TaskResult{}
r.convertTo(ctx, &new)
sink.Results = append(sink.Results, new)
}
sink.Params = nil
for _, p := range ts.Params {
new := v1.ParamSpec{}
p.convertTo(ctx, &new)
sink.Params = append(sink.Params, new)
}
sink.DisplayName = ts.DisplayName
sink.Description = ts.Description
return nil
}
// ConvertFrom implements apis.Convertible
func (t *Task) ConvertFrom(ctx context.Context, from apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
switch source := from.(type) {
case *v1.Task:
t.ObjectMeta = source.ObjectMeta
if err := deserializeResources(&t.ObjectMeta, &t.Spec); err != nil {
return err
}
return t.Spec.ConvertFrom(ctx, &source.Spec, &t.ObjectMeta, t.Name)
default:
return fmt.Errorf("unknown version, got: %T", t)
}
}
// ConvertFrom implements apis.Convertible
func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1.TaskSpec, meta *metav1.ObjectMeta, taskName string) error {
ts.Steps = nil
for _, s := range source.Steps {
new := Step{}
new.convertFrom(ctx, s)
ts.Steps = append(ts.Steps, new)
}
ts.Volumes = Volumes(source.Volumes)
if source.StepTemplate != nil {
new := StepTemplate{}
new.convertFrom(ctx, source.StepTemplate)
ts.StepTemplate = &new
}
if err := deserializeTaskDeprecations(meta, ts, taskName); err != nil {
return err
}
ts.Sidecars = nil
for _, s := range source.Sidecars {
new := Sidecar{}
new.convertFrom(ctx, s)
ts.Sidecars = append(ts.Sidecars, new)
}
ts.Workspaces = nil
for _, w := range source.Workspaces {
new := WorkspaceDeclaration{}
new.convertFrom(ctx, w)
ts.Workspaces = append(ts.Workspaces, new)
}
ts.Results = nil
for _, r := range source.Results {
new := TaskResult{}
new.convertFrom(ctx, r)
ts.Results = append(ts.Results, new)
}
ts.Params = nil
for _, p := range source.Params {
new := ParamSpec{}
new.convertFrom(ctx, p)
ts.Params = append(ts.Params, new)
}
ts.DisplayName = source.DisplayName
ts.Description = source.Description
return nil
}
// taskDeprecation contains deprecated fields of a Task
// +k8s:openapi-gen=false
type taskDeprecation struct {
// DeprecatedSteps contains Steps of a Task that with deprecated fields defined.
// +listType=atomic
DeprecatedSteps []Step `json:"deprecatedSteps,omitempty"`
// DeprecatedStepTemplate contains stepTemplate of a Task that with deprecated fields defined.
DeprecatedStepTemplate *StepTemplate `json:"deprecatedStepTemplate,omitempty"`
}
// taskDeprecations contains deprecated fields of Tasks that belong to the same Pipeline or PipelineRun
// the key is Task name
// +k8s:openapi-gen=false
type taskDeprecations map[string]taskDeprecation
// serializeTaskDeprecations appends the current Task's deprecation info to annotation of the object.
// The object could be Task, TaskRun, Pipeline or PipelineRun
func serializeTaskDeprecations(meta *metav1.ObjectMeta, spec *TaskSpec, taskName string) error {
var taskDeprecation *taskDeprecation
if spec.HasDeprecatedFields() {
taskDeprecation = retrieveTaskDeprecation(spec)
}
existingDeprecations := taskDeprecations{}
if str, ok := meta.Annotations[TaskDeprecationsAnnotationKey]; ok {
if err := json.Unmarshal([]byte(str), &existingDeprecations); err != nil {
return fmt.Errorf("error serializing key %s from metadata: %w", TaskDeprecationsAnnotationKey, err)
}
}
if taskDeprecation != nil {
existingDeprecations[taskName] = *taskDeprecation
return version.SerializeToMetadata(meta, existingDeprecations, TaskDeprecationsAnnotationKey)
}
return nil
}
// deserializeTaskDeprecations retrieves deprecation info of the Task from object annotation.
// The object could be Task, TaskRun, Pipeline or PipelineRun.
func deserializeTaskDeprecations(meta *metav1.ObjectMeta, spec *TaskSpec, taskName string) error {
existingDeprecations := taskDeprecations{}
if meta == nil || meta.Annotations == nil {
return nil
}
if str, ok := meta.Annotations[TaskDeprecationsAnnotationKey]; ok {
if err := json.Unmarshal([]byte(str), &existingDeprecations); err != nil {
return fmt.Errorf("error deserializing key %s from metadata: %w", TaskDeprecationsAnnotationKey, err)
}
}
if td, ok := existingDeprecations[taskName]; ok {
if len(spec.Steps) != len(td.DeprecatedSteps) {
return errors.New("length of deserialized steps mismatch the length of target steps")
}
for i := range len(spec.Steps) {
spec.Steps[i].DeprecatedPorts = td.DeprecatedSteps[i].DeprecatedPorts
spec.Steps[i].DeprecatedLivenessProbe = td.DeprecatedSteps[i].DeprecatedLivenessProbe
spec.Steps[i].DeprecatedReadinessProbe = td.DeprecatedSteps[i].DeprecatedReadinessProbe
spec.Steps[i].DeprecatedStartupProbe = td.DeprecatedSteps[i].DeprecatedStartupProbe
spec.Steps[i].DeprecatedLifecycle = td.DeprecatedSteps[i].DeprecatedLifecycle
spec.Steps[i].DeprecatedTerminationMessagePath = td.DeprecatedSteps[i].DeprecatedTerminationMessagePath
spec.Steps[i].DeprecatedTerminationMessagePolicy = td.DeprecatedSteps[i].DeprecatedTerminationMessagePolicy
spec.Steps[i].DeprecatedStdin = td.DeprecatedSteps[i].DeprecatedStdin
spec.Steps[i].DeprecatedStdinOnce = td.DeprecatedSteps[i].DeprecatedStdinOnce
spec.Steps[i].DeprecatedTTY = td.DeprecatedSteps[i].DeprecatedTTY
}
if td.DeprecatedStepTemplate != nil {
if spec.StepTemplate == nil {
spec.StepTemplate = &StepTemplate{}
}
spec.StepTemplate.DeprecatedName = td.DeprecatedStepTemplate.DeprecatedName
spec.StepTemplate.DeprecatedPorts = td.DeprecatedStepTemplate.DeprecatedPorts
spec.StepTemplate.DeprecatedLivenessProbe = td.DeprecatedStepTemplate.DeprecatedLivenessProbe
spec.StepTemplate.DeprecatedReadinessProbe = td.DeprecatedStepTemplate.DeprecatedReadinessProbe
spec.StepTemplate.DeprecatedStartupProbe = td.DeprecatedStepTemplate.DeprecatedStartupProbe
spec.StepTemplate.DeprecatedLifecycle = td.DeprecatedStepTemplate.DeprecatedLifecycle
spec.StepTemplate.DeprecatedTerminationMessagePath = td.DeprecatedStepTemplate.DeprecatedTerminationMessagePath
spec.StepTemplate.DeprecatedTerminationMessagePolicy = td.DeprecatedStepTemplate.DeprecatedTerminationMessagePolicy
spec.StepTemplate.DeprecatedStdin = td.DeprecatedStepTemplate.DeprecatedStdin
spec.StepTemplate.DeprecatedStdinOnce = td.DeprecatedStepTemplate.DeprecatedStdinOnce
spec.StepTemplate.DeprecatedTTY = td.DeprecatedStepTemplate.DeprecatedTTY
}
delete(existingDeprecations, taskName)
if len(existingDeprecations) == 0 {
delete(meta.Annotations, TaskDeprecationsAnnotationKey)
} else {
updatedDeprecations, err := json.Marshal(existingDeprecations)
if err != nil {
return err
}
meta.Annotations[TaskDeprecationsAnnotationKey] = string(updatedDeprecations)
}
if len(meta.Annotations) == 0 {
meta.Annotations = nil
}
}
return nil
}
func retrieveTaskDeprecation(spec *TaskSpec) *taskDeprecation {
if !spec.HasDeprecatedFields() {
return nil
}
ds := []Step{}
for _, s := range spec.Steps {
ds = append(ds, Step{
DeprecatedPorts: s.DeprecatedPorts,
DeprecatedLivenessProbe: s.DeprecatedLivenessProbe,
DeprecatedReadinessProbe: s.DeprecatedReadinessProbe,
DeprecatedStartupProbe: s.DeprecatedStartupProbe,
DeprecatedLifecycle: s.DeprecatedLifecycle,
DeprecatedTerminationMessagePath: s.DeprecatedTerminationMessagePath,
DeprecatedTerminationMessagePolicy: s.DeprecatedTerminationMessagePolicy,
DeprecatedStdin: s.DeprecatedStdin,
DeprecatedStdinOnce: s.DeprecatedStdinOnce,
DeprecatedTTY: s.DeprecatedTTY,
})
}
var dst *StepTemplate
if spec.StepTemplate != nil {
dst = &StepTemplate{
DeprecatedName: spec.StepTemplate.DeprecatedName,
DeprecatedPorts: spec.StepTemplate.DeprecatedPorts,
DeprecatedLivenessProbe: spec.StepTemplate.DeprecatedLivenessProbe,
DeprecatedReadinessProbe: spec.StepTemplate.DeprecatedReadinessProbe,
DeprecatedStartupProbe: spec.StepTemplate.DeprecatedStartupProbe,
DeprecatedLifecycle: spec.StepTemplate.DeprecatedLifecycle,
DeprecatedTerminationMessagePath: spec.StepTemplate.DeprecatedTerminationMessagePath,
DeprecatedTerminationMessagePolicy: spec.StepTemplate.DeprecatedTerminationMessagePolicy,
DeprecatedStdin: spec.StepTemplate.DeprecatedStdin,
DeprecatedStdinOnce: spec.StepTemplate.DeprecatedStdinOnce,
DeprecatedTTY: spec.StepTemplate.DeprecatedTTY,
}
}
return &taskDeprecation{
DeprecatedSteps: ds,
DeprecatedStepTemplate: dst,
}
}
func serializeResources(meta *metav1.ObjectMeta, spec *TaskSpec) error {
if spec.Resources == nil {
return nil
}
return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey)
}
func deserializeResources(meta *metav1.ObjectMeta, spec *TaskSpec) error {
resources := &TaskResources{}
err := version.DeserializeFromMetadata(meta, resources, resourcesAnnotationKey)
if err != nil {
return err
}
if resources.Inputs != nil || resources.Outputs != nil {
spec.Resources = resources
}
return nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"knative.dev/pkg/apis"
)
var _ apis.Defaultable = (*Task)(nil)
// SetDefaults implements apis.Defaultable
func (t *Task) SetDefaults(ctx context.Context) {
t.Spec.SetDefaults(ctx)
}
// SetDefaults set any defaults for the task spec
func (ts *TaskSpec) SetDefaults(ctx context.Context) {
for i := range ts.Params {
ts.Params[i].SetDefaults(ctx)
}
for i := range ts.Results {
ts.Results[i].SetDefaults(ctx)
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/kmeta"
)
// +genclient
// +genclient:noStatus
// +genreconciler:krshapedlogic=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// Task represents a collection of sequential steps that are run as part of a
// Pipeline using a set of inputs and producing a set of outputs. Tasks execute
// when TaskRuns are created that provide the input parameters and resources and
// output resources the Task requires.
//
// Deprecated: Please use v1.Task instead.
type Task struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata"`
// Spec holds the desired state of the Task from the client
// +optional
Spec TaskSpec `json:"spec"`
}
var _ kmeta.OwnerRefable = (*Task)(nil)
// TaskSpec returns the task's spec
func (t *Task) TaskSpec() TaskSpec {
return t.Spec
}
// TaskMetadata returns the task's ObjectMeta
func (t *Task) TaskMetadata() metav1.ObjectMeta {
return t.ObjectMeta
}
// Copy returns a deep copy of the task
func (t *Task) Copy() TaskObject {
return t.DeepCopy()
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*Task) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind(pipeline.TaskControllerName)
}
// Checksum computes the sha256 checksum of the task object.
// Prior to computing the checksum, it performs some preprocessing on the
// metadata of the object where it removes system provided annotations.
// Only the name, namespace, generateName, user-provided labels and annotations
// and the taskSpec are included for the checksum computation.
func (t *Task) Checksum() ([]byte, error) {
objectMeta := checksum.PrepareObjectMeta(t)
preprocessedTask := Task{
TypeMeta: metav1.TypeMeta{
APIVersion: "tekton.dev/v1beta1",
Kind: "Task"},
ObjectMeta: objectMeta,
Spec: t.Spec,
}
sha256Checksum, err := checksum.ComputeSha256Checksum(preprocessedTask)
if err != nil {
return nil, err
}
return sha256Checksum, nil
}
// +listType=atomic
type Volumes []corev1.Volume
// TaskSpec defines the desired state of Task.
type TaskSpec struct {
// Resources is a list input and output resource to run the task
// Resources are represented in TaskRuns as bindings to instances of
// PipelineResources.
//
// Deprecated: Unused, preserved only for backwards compatibility
// +optional
Resources *TaskResources `json:"resources,omitempty"`
// Params is a list of input parameters required to run the task. Params
// must be supplied as inputs in TaskRuns unless they declare a default
// value.
// +optional
Params ParamSpecs `json:"params,omitempty"`
// DisplayName is a user-facing name of the task that may be
// used to populate a UI.
// +optional
DisplayName string `json:"displayName,omitempty"`
// Description is a user-facing description of the task that may be
// used to populate a UI.
// +optional
Description string `json:"description,omitempty"`
// Steps are the steps of the build; each step is run sequentially with the
// source mounted into /workspace.
// +listType=atomic
Steps []Step `json:"steps,omitempty"`
// Volumes is a collection of volumes that are available to mount into the
// steps of the build.
// See Pod.spec.volumes (API version: v1)
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Volumes Volumes `json:"volumes,omitempty"`
// StepTemplate can be used as the basis for all step containers within the
// Task, so that the steps inherit settings on the base container.
StepTemplate *StepTemplate `json:"stepTemplate,omitempty"`
// Sidecars are run alongside the Task's step containers. They begin before
// the steps start and end after the steps complete.
// +listType=atomic
Sidecars []Sidecar `json:"sidecars,omitempty"`
// Workspaces are the volumes that this Task requires.
// +listType=atomic
Workspaces []WorkspaceDeclaration `json:"workspaces,omitempty"`
// Results are values that this Task can output
// +listType=atomic
Results []TaskResult `json:"results,omitempty"`
}
// TaskList contains a list of Task
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type TaskList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []Task `json:"items"`
}
// HasDeprecatedFields returns true if the TaskSpec has deprecated field specified.
func (ts *TaskSpec) HasDeprecatedFields() bool {
if ts == nil {
return false
}
if len(ts.Steps) > 0 {
for _, s := range ts.Steps {
if len(s.DeprecatedPorts) > 0 ||
s.DeprecatedLivenessProbe != nil ||
s.DeprecatedReadinessProbe != nil ||
s.DeprecatedStartupProbe != nil ||
s.DeprecatedLifecycle != nil ||
s.DeprecatedTerminationMessagePath != "" ||
s.DeprecatedTerminationMessagePolicy != "" ||
s.DeprecatedStdin ||
s.DeprecatedStdinOnce ||
s.DeprecatedTTY {
return true
}
}
}
if ts.StepTemplate != nil {
if len(ts.StepTemplate.DeprecatedPorts) > 0 ||
ts.StepTemplate.DeprecatedName != "" ||
ts.StepTemplate.DeprecatedReadinessProbe != nil ||
ts.StepTemplate.DeprecatedStartupProbe != nil ||
ts.StepTemplate.DeprecatedLifecycle != nil ||
ts.StepTemplate.DeprecatedTerminationMessagePath != "" ||
ts.StepTemplate.DeprecatedTerminationMessagePolicy != "" ||
ts.StepTemplate.DeprecatedStdin ||
ts.StepTemplate.DeprecatedStdinOnce ||
ts.StepTemplate.DeprecatedTTY {
return true
}
}
return false
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"path/filepath"
"regexp"
"slices"
"strings"
"time"
"github.com/tektoncd/pipeline/internal/artifactref"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
"github.com/tektoncd/pipeline/pkg/substitution"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
const (
// stringAndArrayVariableNameFormat is the regex to validate if string/array variable name format follows the following rules.
// - Must only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)
// - Must begin with a letter or an underscore (_)
stringAndArrayVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9.-]*$"
// objectVariableNameFormat is the regext used to validate object name and key names format
// The difference with the array or string name format is that object variable names shouldn't contain dots.
objectVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9-]*$"
)
var (
_ apis.Validatable = (*Task)(nil)
_ resourcesemantics.VerbLimited = (*Task)(nil)
)
// SupportedVerbs returns the operations that validation should be called for
func (t *Task) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
var (
stringAndArrayVariableNameFormatRegex = regexp.MustCompile(stringAndArrayVariableNameFormat)
objectVariableNameFormatRegex = regexp.MustCompile(objectVariableNameFormat)
)
// Validate implements apis.Validatable
func (t *Task) Validate(ctx context.Context) *apis.FieldError {
errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata")
errs = errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec"))
// When a Task is created directly, instead of declared inline in a TaskRun or PipelineRun,
// we do not support propagated parameters. Validate that all params it uses are declared.
return errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.Spec.Steps, t.Spec.Params).ViaField("spec"))
}
// Validate implements apis.Validatable
func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
if len(ts.Steps) == 0 {
errs = errs.Also(apis.ErrMissingField("steps"))
}
errs = errs.Also(ValidateVolumes(ts.Volumes).ViaField("volumes"))
errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces"))
errs = errs.Also(validateWorkspaceUsages(ctx, ts))
mergedSteps, err := MergeStepsWithStepTemplate(ts.StepTemplate, ts.Steps)
if err != nil {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("error merging step template and steps: %s", err),
Paths: []string{"stepTemplate"},
Details: err.Error(),
})
}
errs = errs.Also(validateSteps(ctx, mergedSteps).ViaField("steps"))
errs = errs.Also(validateSidecarNames(ts.Sidecars))
errs = errs.Also(ValidateParameterTypes(ctx, ts.Params).ViaField("params"))
errs = errs.Also(ValidateParameterVariables(ctx, ts.Steps, ts.Params))
errs = errs.Also(validateTaskContextVariables(ctx, ts.Steps))
errs = errs.Also(validateTaskResultsVariables(ctx, ts.Steps, ts.Results))
errs = errs.Also(validateResults(ctx, ts.Results).ViaField("results"))
if ts.Resources != nil {
errs = errs.Also(apis.ErrDisallowedFields("resources"))
}
return errs
}
// ValidateUsageOfDeclaredParameters validates that all parameters referenced in the Task are declared by the Task.
func ValidateUsageOfDeclaredParameters(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError {
var errs *apis.FieldError
_, _, objectParams := params.sortByType()
allParameterNames := sets.NewString(params.getNames()...)
errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames))
errs = errs.Also(validateObjectUsage(ctx, steps, objectParams))
errs = errs.Also(validateObjectParamsHaveProperties(ctx, params))
return errs
}
// validateObjectParamsHaveProperties returns an error if any declared object params are missing properties
func validateObjectParamsHaveProperties(ctx context.Context, params ParamSpecs) *apis.FieldError {
var errs *apis.FieldError
for _, p := range params {
if p.Type == ParamTypeObject && p.Properties == nil {
errs = errs.Also(apis.ErrMissingField(p.Name + ".properties"))
}
}
return errs
}
func validateSidecarNames(sidecars []Sidecar) (errs *apis.FieldError) {
for _, sc := range sidecars {
if sc.Name == pipeline.ReservedResultsSidecarName {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("Invalid: cannot use reserved sidecar name %v ", sc.Name),
Paths: []string{"sidecars"},
})
}
}
return errs
}
func validateResults(ctx context.Context, results []TaskResult) (errs *apis.FieldError) {
for index, result := range results {
errs = errs.Also(result.Validate(ctx).ViaIndex(index))
}
return errs
}
// a mount path which conflicts with any other declared workspaces, with the explicitly
// declared volume mounts, or with the stepTemplate. The names must also be unique.
func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *StepTemplate) (errs *apis.FieldError) {
mountPaths := sets.NewString()
for _, step := range steps {
for _, vm := range step.VolumeMounts {
mountPaths.Insert(filepath.Clean(vm.MountPath))
}
}
if stepTemplate != nil {
for _, vm := range stepTemplate.VolumeMounts {
mountPaths.Insert(filepath.Clean(vm.MountPath))
}
}
wsNames := sets.NewString()
for idx, w := range workspaces {
// Workspace names must be unique
if wsNames.Has(w.Name) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace name %q must be unique", w.Name), "name").ViaIndex(idx))
} else {
wsNames.Insert(w.Name)
}
// Workspaces must not try to use mount paths that are already used
mountPath := filepath.Clean(w.GetMountPath())
if _, ok := mountPaths[mountPath]; ok {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace mount path %q must be unique", mountPath), "mountpath").ViaIndex(idx))
}
mountPaths[mountPath] = struct{}{}
}
return errs
}
// validateWorkspaceUsages checks that all WorkspaceUsage objects in Steps
// refer to workspaces that are defined in the Task.
//
// This is a beta feature and will fail validation if it's used by a step
// or sidecar when the enable-api-fields feature gate is anything but "beta".
//
// Note that this feature reached beta after the v1 API version has been released and
// consequently it is *not* implicitly enabled on the v1beta1 API to avoid suffering
// from the issues described in TEP-0138 https://github.com/tektoncd/community/pull/1034
func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.FieldError) {
workspaces := ts.Workspaces
steps := ts.Steps
sidecars := ts.Sidecars
wsNames := sets.NewString()
for _, w := range workspaces {
wsNames.Insert(w.Name)
}
for stepIdx, step := range steps {
if len(step.Workspaces) != 0 {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "step workspaces", config.BetaAPIFields).ViaIndex(stepIdx).ViaField("steps"))
}
for workspaceIdx, w := range step.Workspaces {
if !wsNames.Has(w.Name) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("undefined workspace %q", w.Name), "name").ViaIndex(workspaceIdx).ViaField("workspaces").ViaIndex(stepIdx).ViaField("steps"))
}
}
}
for sidecarIdx, sidecar := range sidecars {
if len(sidecar.Workspaces) != 0 {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.BetaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars"))
}
for workspaceIdx, w := range sidecar.Workspaces {
if !wsNames.Has(w.Name) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("undefined workspace %q", w.Name), "name").ViaIndex(workspaceIdx).ViaField("workspaces").ViaIndex(sidecarIdx).ViaField("sidecars"))
}
}
}
return errs
}
// ValidateVolumes validates a slice of volumes to make sure there are no dupilcate names
func ValidateVolumes(volumes []corev1.Volume) (errs *apis.FieldError) {
// Task must not have duplicate volume names.
vols := sets.NewString()
for idx, v := range volumes {
if vols.Has(v.Name) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("multiple volumes with same name %q", v.Name), "name").ViaIndex(idx))
} else {
vols.Insert(v.Name)
}
}
return errs
}
func validateSteps(ctx context.Context, steps []Step) (errs *apis.FieldError) {
// Task must not have duplicate step names.
names := sets.NewString()
for idx, s := range steps {
errs = errs.Also(validateStep(ctx, s, names).ViaIndex(idx))
if s.Results != nil {
errs = errs.Also(v1.ValidateStepResultsVariables(ctx, s.Results, s.Script).ViaIndex(idx))
errs = errs.Also(v1.ValidateStepResults(ctx, s.Results).ViaIndex(idx).ViaField("results"))
}
if len(s.When) > 0 {
errs = errs.Also(s.When.validate(ctx).ViaIndex(idx))
}
}
return errs
}
func errorIfStepResultReferenceinField(value, fieldName string) (errs *apis.FieldError) {
matches := resultref.StepResultRegex.FindAllStringSubmatch(value, -1)
if len(matches) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
Paths: []string{fieldName},
})
}
return errs
}
func stepArtifactReferenceExists(src string) bool {
return len(artifactref.StepArtifactRegex.FindAllStringSubmatch(src, -1)) > 0 || strings.Contains(src, "$("+artifactref.StepArtifactPathPattern+")")
}
func taskArtifactReferenceExists(src string) bool {
return len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(src, -1)) > 0 || strings.Contains(src, "$("+artifactref.TaskArtifactPathPattern+")")
}
func errorIfStepArtifactReferencedInField(value, fieldName string) (errs *apis.FieldError) {
if stepArtifactReferenceExists(value) {
errs = errs.Also(&apis.FieldError{
Message: "stepArtifact substitutions are only allowed in env, command, args and script. Found usage in",
Paths: []string{fieldName},
})
}
return errs
}
func validateStepArtifactsReference(s Step) (errs *apis.FieldError) {
errs = errs.Also(errorIfStepArtifactReferencedInField(s.Name, "name"))
errs = errs.Also(errorIfStepArtifactReferencedInField(s.Image, "image"))
errs = errs.Also(errorIfStepArtifactReferencedInField(string(s.ImagePullPolicy), "imagePullPolicy"))
errs = errs.Also(errorIfStepArtifactReferencedInField(s.WorkingDir, "workingDir"))
for _, e := range s.EnvFrom {
errs = errs.Also(errorIfStepArtifactReferencedInField(e.Prefix, "envFrom.prefix"))
if e.ConfigMapRef != nil {
errs = errs.Also(errorIfStepArtifactReferencedInField(e.ConfigMapRef.LocalObjectReference.Name, "envFrom.configMapRef"))
}
if e.SecretRef != nil {
errs = errs.Also(errorIfStepArtifactReferencedInField(e.SecretRef.LocalObjectReference.Name, "envFrom.secretRef"))
}
}
for _, v := range s.VolumeMounts {
errs = errs.Also(errorIfStepArtifactReferencedInField(v.Name, "volumeMounts.name"))
errs = errs.Also(errorIfStepArtifactReferencedInField(v.MountPath, "volumeMounts.mountPath"))
errs = errs.Also(errorIfStepArtifactReferencedInField(v.SubPath, "volumeMounts.subPath"))
}
for _, v := range s.VolumeDevices {
errs = errs.Also(errorIfStepArtifactReferencedInField(v.Name, "volumeDevices.name"))
errs = errs.Also(errorIfStepArtifactReferencedInField(v.DevicePath, "volumeDevices.devicePath"))
}
return errs
}
func validateStepResultReference(s Step) (errs *apis.FieldError) {
errs = errs.Also(errorIfStepResultReferenceinField(s.Name, "name"))
errs = errs.Also(errorIfStepResultReferenceinField(s.Image, "image"))
errs = errs.Also(errorIfStepResultReferenceinField(s.Script, "script"))
errs = errs.Also(errorIfStepResultReferenceinField(string(s.ImagePullPolicy), "imagePullPolicy"))
errs = errs.Also(errorIfStepResultReferenceinField(s.WorkingDir, "workingDir"))
for _, e := range s.EnvFrom {
errs = errs.Also(errorIfStepResultReferenceinField(e.Prefix, "envFrom.prefix"))
if e.ConfigMapRef != nil {
errs = errs.Also(errorIfStepResultReferenceinField(e.ConfigMapRef.LocalObjectReference.Name, "envFrom.configMapRef"))
}
if e.SecretRef != nil {
errs = errs.Also(errorIfStepResultReferenceinField(e.SecretRef.LocalObjectReference.Name, "envFrom.secretRef"))
}
}
for _, v := range s.VolumeMounts {
errs = errs.Also(errorIfStepResultReferenceinField(v.Name, "volumeMounts.name"))
errs = errs.Also(errorIfStepResultReferenceinField(v.MountPath, "volumeMounts.mountPath"))
errs = errs.Also(errorIfStepResultReferenceinField(v.SubPath, "volumeMounts.subPath"))
}
for _, v := range s.VolumeDevices {
errs = errs.Also(errorIfStepResultReferenceinField(v.Name, "volumeDevices.name"))
errs = errs.Also(errorIfStepResultReferenceinField(v.DevicePath, "volumeDevices.devicePath"))
}
return errs
}
func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.FieldError) {
if err := validateArtifactsReferencesInStep(ctx, s); err != nil {
return err
}
if s.Ref != nil {
errs = errs.Also(s.Ref.Validate(ctx))
if s.Image != "" {
errs = errs.Also(&apis.FieldError{
Message: "image cannot be used with Ref",
Paths: []string{"image"},
})
}
if len(s.Command) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "command cannot be used with Ref",
Paths: []string{"command"},
})
}
if len(s.Args) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "args cannot be used with Ref",
Paths: []string{"args"},
})
}
if s.Script != "" {
errs = errs.Also(&apis.FieldError{
Message: "script cannot be used with Ref",
Paths: []string{"script"},
})
}
if s.WorkingDir != "" {
errs = errs.Also(&apis.FieldError{
Message: "working dir cannot be used with Ref",
Paths: []string{"workingDir"},
})
}
if s.Env != nil {
errs = errs.Also(&apis.FieldError{
Message: "env cannot be used with Ref",
Paths: []string{"env"},
})
}
if len(s.VolumeMounts) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "volumeMounts cannot be used with Ref",
Paths: []string{"volumeMounts"},
})
}
if len(s.Results) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "results cannot be used with Ref",
Paths: []string{"results"},
})
}
} else {
if len(s.Params) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "params cannot be used without Ref",
Paths: []string{"params"},
})
}
if s.Image == "" {
errs = errs.Also(apis.ErrMissingField("Image"))
}
if s.Script != "" {
if len(s.Command) > 0 {
errs = errs.Also(&apis.FieldError{
Message: "script cannot be used with command",
Paths: []string{"script"},
})
}
}
}
if s.Name != "" {
if names.Has(s.Name) {
errs = errs.Also(apis.ErrInvalidValue(s.Name, "name"))
}
if e := validation.IsDNS1123Label(s.Name); len(e) > 0 {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("invalid value %q", s.Name),
Paths: []string{"name"},
Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
})
}
names.Insert(s.Name)
}
if s.Timeout != nil {
if s.Timeout.Duration < time.Duration(0) {
return apis.ErrInvalidValue(s.Timeout.Duration, "negative timeout")
}
}
for j, vm := range s.VolumeMounts {
if strings.HasPrefix(vm.MountPath, "/tekton/") &&
!strings.HasPrefix(vm.MountPath, "/tekton/home") {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("volumeMount cannot be mounted under /tekton/ (volumeMount %q mounted at %q)", vm.Name, vm.MountPath), "mountPath").ViaFieldIndex("volumeMounts", j))
}
if strings.HasPrefix(vm.Name, "tekton-internal-") {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf(`volumeMount name %q cannot start with "tekton-internal-"`, vm.Name), "name").ViaFieldIndex("volumeMounts", j))
}
}
if s.OnError != "" {
if !isParamRefs(string(s.OnError)) && s.OnError != Continue && s.OnError != StopAndFail {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("invalid value: \"%v\"", s.OnError),
Paths: []string{"onError"},
Details: "Task step onError must be either \"continue\" or \"stopAndFail\"",
})
}
}
if s.Script != "" {
cleaned := strings.TrimSpace(s.Script)
if strings.HasPrefix(cleaned, "#!win") {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script"))
}
}
// StdoutConfig is an alpha feature and will fail validation if it's used in a task spec
// when the enable-api-fields feature gate is not "alpha".
if s.StdoutConfig != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "step stdout stream support", config.AlphaAPIFields).ViaField("stdoutconfig"))
}
// StderrConfig is an alpha feature and will fail validation if it's used in a task spec
// when the enable-api-fields feature gate is not "alpha".
if s.StderrConfig != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "step stderr stream support", config.AlphaAPIFields).ViaField("stderrconfig"))
}
// Validate usage of step result reference.
// Referencing previous step's results are only allowed in `env`, `command` and `args`.
errs = errs.Also(validateStepResultReference(s))
// Validate usage of step artifacts output reference
// Referencing previous step's results are only allowed in `env`, `command` and `args`, `script`.
errs = errs.Also(validateStepArtifactsReference(s))
return errs
}
func validateArtifactsReferencesInStep(ctx context.Context, s Step) *apis.FieldError {
if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableArtifacts {
var t []string
t = append(t, s.Script)
t = append(t, s.Command...)
t = append(t, s.Args...)
for _, e := range s.Env {
t = append(t, e.Value)
}
if slices.ContainsFunc(t, stepArtifactReferenceExists) || slices.ContainsFunc(t, taskArtifactReferenceExists) {
return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "")
}
}
return nil
}
// ValidateParameterTypes validates all the types within a slice of ParamSpecs
func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) {
for _, p := range params {
errs = errs.Also(p.ValidateType(ctx))
}
return errs
}
// ValidateType checks that the type of a ParamSpec is allowed and its default value matches that type
func (p ParamSpec) ValidateType(ctx context.Context) *apis.FieldError {
// Ensure param has a valid type.
validType := false
for _, allowedType := range AllParamTypes {
if p.Type == allowedType {
validType = true
}
}
if !validType {
return apis.ErrInvalidValue(p.Type, p.Name+".type")
}
// If a default value is provided, ensure its type matches param's declared type.
if (p.Default != nil) && (p.Default.Type != p.Type) {
return &apis.FieldError{
Message: fmt.Sprintf(
"\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type),
Paths: []string{
p.Name + ".type",
p.Name + ".default.type",
},
}
}
// Check object type and its PropertySpec type
return p.ValidateObjectType(ctx)
}
// ValidateObjectType checks that object type parameter does not miss the
// definition of `properties` section and the type of a PropertySpec is allowed.
// (Currently, only string is allowed)
func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError {
invalidKeys := []string{}
for key, propertySpec := range p.Properties {
if propertySpec.Type != ParamTypeString {
invalidKeys = append(invalidKeys, key)
}
}
if len(invalidKeys) != 0 {
return &apis.FieldError{
Message: fmt.Sprintf("The value type specified for these keys %v is invalid", invalidKeys),
Paths: []string{p.Name + ".properties"},
}
}
return nil
}
// ValidateParameterVariables validates all variables within a slice of ParamSpecs against a slice of Steps
func ValidateParameterVariables(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError {
var errs *apis.FieldError
errs = errs.Also(params.validateNoDuplicateNames())
errs = errs.Also(params.validateParamEnums(ctx).ViaField("params"))
stringParams, arrayParams, objectParams := params.sortByType()
stringParameterNames := sets.NewString(stringParams.getNames()...)
arrayParameterNames := sets.NewString(arrayParams.getNames()...)
errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParams))
return errs.Also(validateArrayUsage(steps, "params", arrayParameterNames))
}
// validateTaskContextVariables returns an error if any Steps reference context variables that don't exist.
func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError {
taskRunContextNames := sets.NewString().Insert(
"name",
"namespace",
"uid",
)
taskContextNames := sets.NewString().Insert(
"name",
"retry-count",
)
errs := validateVariables(ctx, steps, "context\\.taskRun", taskRunContextNames)
return errs.Also(validateVariables(ctx, steps, "context\\.task", taskContextNames))
}
// validateTaskResultsVariables validates if the results referenced in step script are defined in task results
func validateTaskResultsVariables(ctx context.Context, steps []Step, results []TaskResult) (errs *apis.FieldError) {
resultsNames := sets.NewString()
for _, r := range results {
resultsNames.Insert(r.Name)
}
for idx, step := range steps {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariablesWithDetail(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx))
}
return errs
}
// validateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object
func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) (errs *apis.FieldError) {
objectParameterNames := sets.NewString()
for _, p := range params {
// collect all names of object type params
objectParameterNames.Insert(p.Name)
// collect all keys for this object param
objectKeys := sets.NewString()
for key := range p.Properties {
objectKeys.Insert(key)
}
// check if the object's key names are referenced correctly i.e. param.objectParam.key1
errs = errs.Also(validateVariables(ctx, steps, "params\\."+p.Name, objectKeys))
}
return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames))
}
// validateObjectUsageAsWhole returns an error if the Steps contain references to the entire input object params in fields where these references are prohibited
func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) {
for idx, step := range steps {
errs = errs.Also(validateStepObjectUsageAsWhole(step, prefix, vars)).ViaFieldIndex("steps", idx)
}
return errs
}
// validateStepObjectUsageAsWhole returns an error if the Step contains references to the entire input object params in fields where these references are prohibited
func validateStepObjectUsageAsWhole(step Step, prefix string, vars sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Name, prefix, vars).ViaField("name")
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Image, prefix, vars).ViaField("image"))
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.WorkingDir, prefix, vars).ViaField("workingDir"))
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Script, prefix, vars).ViaField("script"))
for i, cmd := range step.Command {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(cmd, prefix, vars).ViaFieldIndex("command", i))
}
for i, arg := range step.Args {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(arg, prefix, vars).ViaFieldIndex("args", i))
}
for _, env := range step.Env {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name))
}
for i, v := range step.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i))
}
return errs
}
// validateArrayUsage returns an error if the Steps contain references to the input array params in fields where these references are prohibited
func validateArrayUsage(steps []Step, prefix string, arrayParamNames sets.String) (errs *apis.FieldError) {
for idx, step := range steps {
errs = errs.Also(validateStepArrayUsage(step, prefix, arrayParamNames)).ViaFieldIndex("steps", idx)
}
return errs
}
// validateStepArrayUsage returns an error if the Step contains references to the input array params in fields where these references are prohibited
func validateStepArrayUsage(step Step, prefix string, arrayParamNames sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToProhibitedVariables(step.Name, prefix, arrayParamNames).ViaField("name")
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Image, prefix, arrayParamNames).ViaField("image"))
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.WorkingDir, prefix, arrayParamNames).ViaField("workingDir"))
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Script, prefix, arrayParamNames).ViaField("script"))
for i, cmd := range step.Command {
errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(cmd, prefix, arrayParamNames).ViaFieldIndex("command", i))
}
for i, arg := range step.Args {
errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(arg, prefix, arrayParamNames).ViaFieldIndex("args", i))
}
for _, env := range step.Env {
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(env.Value, prefix, arrayParamNames).ViaFieldKey("env", env.Name))
}
for i, v := range step.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.Name, prefix, arrayParamNames).ViaField("name").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.MountPath, prefix, arrayParamNames).ViaField("mountPath").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.SubPath, prefix, arrayParamNames).ViaField("subPath").ViaFieldIndex("volumeMount", i))
}
return errs
}
// validateVariables returns an error if the Steps contain references to any unknown variables
func validateVariables(ctx context.Context, steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) {
// We've checked param name format. Now, we want to check if param names are referenced correctly in each step
for idx, step := range steps {
errs = errs.Also(validateStepVariables(ctx, step, prefix, vars).ViaFieldIndex("steps", idx))
}
return errs
}
// validateNameFormat validates that the name format of all param types follows the rules
func validateNameFormat(stringAndArrayParams sets.String, objectParams []ParamSpec) (errs *apis.FieldError) {
// checking string or array name format
// ----
invalidStringAndArrayNames := []string{}
// Converting to sorted list here rather than just looping map keys
// because we want the order of items in vars to be deterministic for purpose of unit testing
for _, name := range stringAndArrayParams.List() {
if !stringAndArrayVariableNameFormatRegex.MatchString(name) {
invalidStringAndArrayNames = append(invalidStringAndArrayNames, name)
}
}
if len(invalidStringAndArrayNames) != 0 {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("The format of following array and string variable names is invalid: %s", invalidStringAndArrayNames),
Paths: []string{"params"},
Details: "String/Array Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)\nMust begin with a letter or an underscore (_)",
})
}
// checking object name and key name format
// -----
invalidObjectNames := map[string][]string{}
for _, obj := range objectParams {
// check object param name
if !objectVariableNameFormatRegex.MatchString(obj.Name) {
invalidObjectNames[obj.Name] = []string{}
}
// check key names
for k := range obj.Properties {
if !objectVariableNameFormatRegex.MatchString(k) {
invalidObjectNames[obj.Name] = append(invalidObjectNames[obj.Name], k)
}
}
}
if len(invalidObjectNames) != 0 {
errs = errs.Also(&apis.FieldError{
Message: fmt.Sprintf("Object param name and key name format is invalid: %s", invalidObjectNames),
Paths: []string{"params"},
Details: "Object Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_) \nMust begin with a letter or an underscore (_)",
})
}
return errs
}
// validateStepVariables returns an error if the Step contains references to any unknown variables
func validateStepVariables(ctx context.Context, step Step, prefix string, vars sets.String) *apis.FieldError {
errs := substitution.ValidateNoReferencesToUnknownVariables(step.Name, prefix, vars).ViaField("name")
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Image, prefix, vars).ViaField("image"))
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.WorkingDir, prefix, vars).ViaField("workingDir"))
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariablesWithDetail(step.Script, prefix, vars).ViaField("script"))
for i, cmd := range step.Command {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(cmd, prefix, vars).ViaFieldIndex("command", i))
}
for i, arg := range step.Args {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(arg, prefix, vars).ViaFieldIndex("args", i))
}
for _, env := range step.Env {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name))
}
for i, v := range step.VolumeMounts {
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i))
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i))
}
errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(string(step.OnError), prefix, vars).ViaField("onError"))
return errs
}
// isParamRefs attempts to check if a specified string looks like it contains any parameter reference
// This is useful to make sure the specified value looks like a Parameter Reference before performing any strict validation
func isParamRefs(s string) bool {
return strings.HasPrefix(s, "$("+ParamsPrefix)
}
// GetIndexingReferencesToArrayParams returns all strings referencing indices of TaskRun array parameters
// from parameters, workspaces, and when expressions defined in the Task.
// For example, if a Task has a parameter with a value "$(params.array-param-name[1])",
// this would be one of the strings returned.
func (ts *TaskSpec) GetIndexingReferencesToArrayParams() sets.String {
// collect all the possible places to use param references
paramsRefs := []string{}
paramsRefs = append(paramsRefs, extractParamRefsFromSteps(ts.Steps)...)
paramsRefs = append(paramsRefs, extractParamRefsFromStepTemplate(ts.StepTemplate)...)
paramsRefs = append(paramsRefs, extractParamRefsFromVolumes(ts.Volumes)...)
for _, v := range ts.Workspaces {
paramsRefs = append(paramsRefs, v.MountPath)
}
paramsRefs = append(paramsRefs, extractParamRefsFromSidecars(ts.Sidecars)...)
// extract all array indexing references, for example []{"$(params.array-params[1])"}
arrayIndexParamRefs := []string{}
for _, p := range paramsRefs {
arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...)
}
return sets.NewString(arrayIndexParamRefs...)
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
)
func (tr TaskRef) convertTo(ctx context.Context, sink *v1.TaskRef) {
sink.Name = tr.Name
sink.Kind = v1.TaskKind(tr.Kind)
sink.APIVersion = tr.APIVersion
new := v1.ResolverRef{}
tr.ResolverRef.convertTo(ctx, &new)
sink.ResolverRef = new
}
// ConvertFrom converts v1beta1 TaskRef from v1 TaskRef
func (tr *TaskRef) ConvertFrom(ctx context.Context, source v1.TaskRef) {
tr.Name = source.Name
tr.Kind = TaskKind(source.Kind)
tr.APIVersion = source.APIVersion
new := ResolverRef{}
new.convertFrom(ctx, source.ResolverRef)
tr.ResolverRef = new
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
// TaskRef can be used to refer to a specific instance of a task.
type TaskRef struct {
// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
Name string `json:"name,omitempty"`
// TaskKind indicates the Kind of the Task:
// 1. Namespaced Task when Kind is set to "Task". If Kind is "", it defaults to "Task".
// 2. Custom Task when Kind is non-empty and APIVersion is non-empty
Kind TaskKind `json:"kind,omitempty"`
// API version of the referent
// Note: A Task with non-empty APIVersion and Kind is considered a Custom Task
// +optional
APIVersion string `json:"apiVersion,omitempty"`
// Bundle url reference to a Tekton Bundle.
//
// Deprecated: Please use ResolverRef with the bundles resolver instead.
// The field is staying there for go client backward compatibility, but is not used/allowed anymore.
// +optional
Bundle string `json:"bundle,omitempty"`
// ResolverRef allows referencing a Task in a remote location
// like a git repo. This field is only supported when the alpha
// feature gate is enabled.
// +optional
ResolverRef `json:",omitempty"`
}
// Check that Pipeline may be validated and defaulted.
// TaskKind defines the type of Task used by the pipeline.
type TaskKind string
const (
// NamespacedTaskKind indicates that the task type has a namespaced scope.
NamespacedTaskKind TaskKind = "Task"
)
// IsCustomTask checks whether the reference is to a Custom Task
func (tr *TaskRef) IsCustomTask() bool {
// Note that if `apiVersion` is set to `"tekton.dev/v1beta1"` and `kind` is set to `"Task"`,
// the reference will be considered a Custom Task - https://github.com/tektoncd/pipeline/issues/6457
return tr != nil && tr.APIVersion != "" && tr.Kind != ""
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
"k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
)
// Validate ensures that a supplied TaskRef field is populated
// correctly. No errors are returned for a nil TaskRef.
func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) {
if ref == nil {
return errs
}
if apis.IsInCreate(ctx) && ref.Bundle != "" {
errs = errs.Also(apis.ErrDisallowedFields("bundle"))
}
switch {
case ref.Resolver != "" || ref.Params != nil:
if ref.Params != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
if ref.Name != "" {
errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
}
if ref.Resolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
errs = errs.Also(ValidateParameters(ctx, ref.Params))
}
if ref.Resolver != "" {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
if ref.Name != "" {
// make sure that the name is url-like.
err := RefNameLikeUrl(ref.Name)
if err == nil && !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
// If name is url-like then concise resolver syntax must be enabled
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
}
if err != nil {
errs = errs.Also(apis.ErrInvalidValue(err, "name"))
}
}
}
case ref.Name != "":
// ref name can be a Url-like format.
if err := RefNameLikeUrl(ref.Name); err == nil {
// If name is url-like then concise resolver syntax must be enabled
if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
}
// In stage1 of concise remote resolvers syntax, this is a required field.
// TODO: remove this check when implementing stage 2 where this is optional.
if ref.Resolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
// Or, it must be a valid k8s name
} else {
// ref name must be a valid k8s name
if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 {
errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
}
}
default:
errs = errs.Also(apis.ErrMissingField("name"))
}
return //nolint:nakedret
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/version"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
)
const (
cloudEventsAnnotationKey = "tekton.dev/v1beta1CloudEvents"
resourcesResultAnnotationKey = "tekton.dev/v1beta1ResourcesResult"
resourcesStatusAnnotationKey = "tekton.dev/v1beta1ResourcesStatus"
)
var _ apis.Convertible = (*TaskRun)(nil)
// ConvertTo implements apis.Convertible
func (tr *TaskRun) ConvertTo(ctx context.Context, to apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
switch sink := to.(type) {
case *v1.TaskRun:
sink.ObjectMeta = tr.ObjectMeta
if err := serializeTaskRunResources(&sink.ObjectMeta, &tr.Spec); err != nil {
return err
}
if err := serializeTaskRunCloudEvents(&sink.ObjectMeta, &tr.Status); err != nil {
return err
}
if err := serializeTaskRunResourcesResult(&sink.ObjectMeta, &tr.Status); err != nil {
return err
}
if err := serializeTaskRunResourcesStatus(&sink.ObjectMeta, &tr.Status); err != nil {
return err
}
if err := tr.Status.ConvertTo(ctx, &sink.Status, &sink.ObjectMeta); err != nil {
return err
}
return tr.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta)
default:
return fmt.Errorf("unknown version, got: %T", sink)
}
}
// ConvertTo implements apis.Convertible
func (trs *TaskRunSpec) ConvertTo(ctx context.Context, sink *v1.TaskRunSpec, meta *metav1.ObjectMeta) error {
if trs.Debug != nil {
sink.Debug = &v1.TaskRunDebug{}
trs.Debug.convertTo(ctx, sink.Debug)
}
sink.Params = nil
for _, p := range trs.Params {
new := v1.Param{}
p.convertTo(ctx, &new)
sink.Params = append(sink.Params, new)
}
sink.ServiceAccountName = trs.ServiceAccountName
if trs.TaskRef != nil {
sink.TaskRef = &v1.TaskRef{}
trs.TaskRef.convertTo(ctx, sink.TaskRef)
}
if trs.TaskSpec != nil {
sink.TaskSpec = &v1.TaskSpec{}
err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec, meta, meta.Name)
if err != nil {
return err
}
}
sink.Status = v1.TaskRunSpecStatus(trs.Status)
sink.StatusMessage = v1.TaskRunSpecStatusMessage(trs.StatusMessage)
sink.Retries = trs.Retries
sink.Timeout = trs.Timeout
sink.PodTemplate = trs.PodTemplate
sink.Workspaces = nil
for _, w := range trs.Workspaces {
new := v1.WorkspaceBinding{}
w.convertTo(ctx, &new)
sink.Workspaces = append(sink.Workspaces, new)
}
sink.StepSpecs = nil
for _, so := range trs.StepOverrides {
new := v1.TaskRunStepSpec{}
so.convertTo(ctx, &new)
sink.StepSpecs = append(sink.StepSpecs, new)
}
sink.SidecarSpecs = nil
for _, so := range trs.SidecarOverrides {
new := v1.TaskRunSidecarSpec{}
so.convertTo(ctx, &new)
sink.SidecarSpecs = append(sink.SidecarSpecs, new)
}
sink.ComputeResources = trs.ComputeResources
return nil
}
// ConvertFrom implements apis.Convertible
func (tr *TaskRun) ConvertFrom(ctx context.Context, from apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
switch source := from.(type) {
case *v1.TaskRun:
tr.ObjectMeta = source.ObjectMeta
if err := deserializeTaskRunResources(&tr.ObjectMeta, &tr.Spec); err != nil {
return err
}
if err := deserializeTaskRunCloudEvents(&tr.ObjectMeta, &tr.Status); err != nil {
return err
}
if err := deserializeTaskRunResourcesResult(&tr.ObjectMeta, &tr.Status); err != nil {
return err
}
if err := tr.Status.ConvertFrom(ctx, source.Status, &tr.ObjectMeta); err != nil {
return err
}
if err := deserializeTaskRunResourcesStatus(&tr.ObjectMeta, &tr.Status); err != nil {
return err
}
return tr.Spec.ConvertFrom(ctx, &source.Spec, &tr.ObjectMeta)
default:
return fmt.Errorf("unknown version, got: %T", tr)
}
}
// ConvertFrom implements apis.Convertible
func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1.TaskRunSpec, meta *metav1.ObjectMeta) error {
if source.Debug != nil {
newDebug := TaskRunDebug{}
newDebug.convertFrom(ctx, *source.Debug)
trs.Debug = &newDebug
}
trs.Params = nil
for _, p := range source.Params {
new := Param{}
new.ConvertFrom(ctx, p)
trs.Params = append(trs.Params, new)
}
trs.ServiceAccountName = source.ServiceAccountName
if source.TaskRef != nil {
newTaskRef := TaskRef{}
newTaskRef.ConvertFrom(ctx, *source.TaskRef)
trs.TaskRef = &newTaskRef
}
if source.TaskSpec != nil {
newTaskSpec := TaskSpec{}
err := newTaskSpec.ConvertFrom(ctx, source.TaskSpec, meta, meta.Name)
if err != nil {
return err
}
trs.TaskSpec = &newTaskSpec
}
trs.Status = TaskRunSpecStatus(source.Status)
trs.StatusMessage = TaskRunSpecStatusMessage(source.StatusMessage)
trs.Retries = source.Retries
trs.Timeout = source.Timeout
trs.PodTemplate = source.PodTemplate
trs.Workspaces = nil
for _, w := range source.Workspaces {
new := WorkspaceBinding{}
new.ConvertFrom(ctx, w)
trs.Workspaces = append(trs.Workspaces, new)
}
trs.StepOverrides = nil
for _, so := range source.StepSpecs {
new := TaskRunStepOverride{}
new.convertFrom(ctx, so)
trs.StepOverrides = append(trs.StepOverrides, new)
}
trs.SidecarOverrides = nil
for _, so := range source.SidecarSpecs {
new := TaskRunSidecarOverride{}
new.convertFrom(ctx, so)
trs.SidecarOverrides = append(trs.SidecarOverrides, new)
}
trs.ComputeResources = source.ComputeResources
return nil
}
func (trd TaskRunDebug) convertTo(ctx context.Context, sink *v1.TaskRunDebug) {
if trd.Breakpoints != nil {
sink.Breakpoints = &v1.TaskBreakpoints{}
trd.Breakpoints.convertTo(ctx, sink.Breakpoints)
}
}
func (trd *TaskRunDebug) convertFrom(ctx context.Context, source v1.TaskRunDebug) {
if source.Breakpoints != nil {
newBreakpoints := TaskBreakpoints{}
newBreakpoints.convertFrom(ctx, *source.Breakpoints)
trd.Breakpoints = &newBreakpoints
}
}
func (tbp TaskBreakpoints) convertTo(ctx context.Context, sink *v1.TaskBreakpoints) {
sink.OnFailure = tbp.OnFailure
if len(tbp.BeforeSteps) > 0 {
sink.BeforeSteps = make([]string, 0)
sink.BeforeSteps = append(sink.BeforeSteps, tbp.BeforeSteps...)
}
}
func (tbp *TaskBreakpoints) convertFrom(ctx context.Context, source v1.TaskBreakpoints) {
tbp.OnFailure = source.OnFailure
if len(source.BeforeSteps) > 0 {
tbp.BeforeSteps = make([]string, 0)
tbp.BeforeSteps = append(tbp.BeforeSteps, source.BeforeSteps...)
}
}
func (trso TaskRunStepOverride) convertTo(ctx context.Context, sink *v1.TaskRunStepSpec) {
sink.Name = trso.Name
sink.ComputeResources = trso.Resources
}
func (trso *TaskRunStepOverride) convertFrom(ctx context.Context, source v1.TaskRunStepSpec) {
trso.Name = source.Name
trso.Resources = source.ComputeResources
}
func (trso TaskRunSidecarOverride) convertTo(ctx context.Context, sink *v1.TaskRunSidecarSpec) {
sink.Name = trso.Name
sink.ComputeResources = trso.Resources
}
func (trso *TaskRunSidecarOverride) convertFrom(ctx context.Context, source v1.TaskRunSidecarSpec) {
trso.Name = source.Name
trso.Resources = source.ComputeResources
}
// ConvertTo implements apis.Convertible
func (trs *TaskRunStatus) ConvertTo(ctx context.Context, sink *v1.TaskRunStatus, meta *metav1.ObjectMeta) error {
sink.Status = trs.Status
sink.PodName = trs.PodName
sink.StartTime = trs.StartTime
sink.CompletionTime = trs.CompletionTime
sink.Steps = nil
for _, ss := range trs.Steps {
new := v1.StepState{}
ss.convertTo(ctx, &new)
sink.Steps = append(sink.Steps, new)
}
sink.RetriesStatus = nil
for _, rr := range trs.RetriesStatus {
new := v1.TaskRunStatus{}
err := rr.ConvertTo(ctx, &new, meta)
if err != nil {
return err
}
sink.RetriesStatus = append(sink.RetriesStatus, new)
}
sink.Results = nil
for _, trr := range trs.TaskRunResults {
new := v1.TaskRunResult{}
trr.convertTo(ctx, &new)
sink.Results = append(sink.Results, new)
}
sink.Sidecars = nil
for _, sc := range trs.Sidecars {
new := v1.SidecarState{}
sc.convertTo(ctx, &new)
sink.Sidecars = append(sink.Sidecars, new)
}
if trs.TaskSpec != nil {
sink.TaskSpec = &v1.TaskSpec{}
err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec, meta, meta.Name)
if err != nil {
return err
}
}
if trs.Provenance != nil {
new := v1.Provenance{}
trs.Provenance.convertTo(ctx, &new)
sink.Provenance = &new
}
return nil
}
// ConvertFrom implements apis.Convertible
func (trs *TaskRunStatus) ConvertFrom(ctx context.Context, source v1.TaskRunStatus, meta *metav1.ObjectMeta) error {
trs.Status = source.Status
trs.PodName = source.PodName
trs.StartTime = source.StartTime
trs.CompletionTime = source.CompletionTime
trs.Steps = nil
for _, ss := range source.Steps {
new := StepState{}
new.convertFrom(ctx, ss)
trs.Steps = append(trs.Steps, new)
}
trs.RetriesStatus = nil
for _, rr := range source.RetriesStatus {
new := TaskRunStatus{}
err := new.ConvertFrom(ctx, rr, meta)
if err != nil {
return err
}
trs.RetriesStatus = append(trs.RetriesStatus, new)
}
trs.TaskRunResults = nil
for _, trr := range source.Results {
new := TaskRunResult{}
new.convertFrom(ctx, trr)
trs.TaskRunResults = append(trs.TaskRunResults, new)
}
trs.Sidecars = nil
for _, sc := range source.Sidecars {
new := SidecarState{}
new.convertFrom(ctx, sc)
trs.Sidecars = append(trs.Sidecars, new)
}
if source.TaskSpec != nil {
trs.TaskSpec = &TaskSpec{}
err := trs.TaskSpec.ConvertFrom(ctx, source.TaskSpec, meta, meta.Name)
if err != nil {
return err
}
}
if source.Provenance != nil {
new := Provenance{}
new.convertFrom(ctx, *source.Provenance)
trs.Provenance = &new
}
return nil
}
func (ss StepState) convertTo(ctx context.Context, sink *v1.StepState) {
sink.ContainerState = ss.ContainerState
sink.Name = ss.Name
sink.Container = ss.ContainerName
sink.ImageID = ss.ImageID
sink.Results = nil
if ss.Provenance != nil {
new := v1.Provenance{}
ss.Provenance.convertTo(ctx, &new)
sink.Provenance = &new
}
if ss.ContainerState.Terminated != nil {
sink.TerminationReason = ss.ContainerState.Terminated.Reason
}
for _, o := range ss.Outputs {
new := v1.TaskRunStepArtifact{}
o.convertTo(ctx, &new)
sink.Outputs = append(sink.Outputs, new)
}
for _, o := range ss.Inputs {
new := v1.TaskRunStepArtifact{}
o.convertTo(ctx, &new)
sink.Inputs = append(sink.Inputs, new)
}
for _, r := range ss.Results {
new := v1.TaskRunStepResult{}
r.convertTo(ctx, &new)
sink.Results = append(sink.Results, new)
}
}
func (ss *StepState) convertFrom(ctx context.Context, source v1.StepState) {
ss.ContainerState = source.ContainerState
ss.Name = source.Name
ss.ContainerName = source.Container
ss.ImageID = source.ImageID
ss.Results = nil
for _, r := range source.Results {
new := TaskRunStepResult{}
new.convertFrom(ctx, r)
ss.Results = append(ss.Results, new)
}
if source.Provenance != nil {
new := Provenance{}
new.convertFrom(ctx, *source.Provenance)
ss.Provenance = &new
}
for _, o := range source.Outputs {
new := TaskRunStepArtifact{}
new.convertFrom(ctx, o)
ss.Outputs = append(ss.Outputs, new)
}
for _, o := range source.Inputs {
new := TaskRunStepArtifact{}
new.convertFrom(ctx, o)
ss.Inputs = append(ss.Inputs, new)
}
}
func (trr TaskRunResult) convertTo(ctx context.Context, sink *v1.TaskRunResult) {
sink.Name = trr.Name
sink.Type = v1.ResultsType(trr.Type)
newValue := v1.ParamValue{}
trr.Value.convertTo(ctx, &newValue)
sink.Value = newValue
}
func (trr *TaskRunResult) convertFrom(ctx context.Context, source v1.TaskRunResult) {
trr.Name = source.Name
trr.Type = ResultsType(source.Type)
newValue := ParamValue{}
newValue.convertFrom(ctx, source.Value)
trr.Value = newValue
}
func (t *TaskRunStepArtifact) convertFrom(ctx context.Context, source v1.TaskRunStepArtifact) {
t.Name = source.Name
for _, v := range source.Values {
new := ArtifactValue{}
new.convertFrom(ctx, v)
t.Values = append(t.Values, new)
}
}
func (t TaskRunStepArtifact) convertTo(ctx context.Context, sink *v1.TaskRunStepArtifact) {
sink.Name = t.Name
for _, v := range t.Values {
new := v1.ArtifactValue{}
v.convertTo(ctx, &new)
sink.Values = append(sink.Values, new)
}
}
func (t *ArtifactValue) convertFrom(ctx context.Context, source v1.ArtifactValue) {
t.Uri = source.Uri
if source.Digest != nil {
t.Digest = map[Algorithm]string{}
for i, a := range source.Digest {
t.Digest[Algorithm(i)] = a
}
}
}
func (t ArtifactValue) convertTo(ctx context.Context, sink *v1.ArtifactValue) {
sink.Uri = t.Uri
if t.Digest != nil {
sink.Digest = map[v1.Algorithm]string{}
for i, a := range t.Digest {
sink.Digest[v1.Algorithm(i)] = a
}
}
}
func (ss SidecarState) convertTo(ctx context.Context, sink *v1.SidecarState) {
sink.ContainerState = ss.ContainerState
sink.Name = ss.Name
sink.Container = ss.ContainerName
sink.ImageID = ss.ImageID
}
func (ss *SidecarState) convertFrom(ctx context.Context, source v1.SidecarState) {
ss.ContainerState = source.ContainerState
ss.Name = source.Name
ss.ContainerName = source.Container
ss.ImageID = source.ImageID
}
func serializeTaskRunResources(meta *metav1.ObjectMeta, spec *TaskRunSpec) error {
if spec.Resources == nil {
return nil
}
return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey)
}
func deserializeTaskRunResources(meta *metav1.ObjectMeta, spec *TaskRunSpec) error {
resources := &TaskRunResources{}
err := version.DeserializeFromMetadata(meta, resources, resourcesAnnotationKey)
if err != nil {
return err
}
if resources.Inputs != nil || resources.Outputs != nil {
spec.Resources = resources
}
return nil
}
func serializeTaskRunCloudEvents(meta *metav1.ObjectMeta, status *TaskRunStatus) error {
if status.CloudEvents == nil {
return nil
}
return version.SerializeToMetadata(meta, status.CloudEvents, cloudEventsAnnotationKey)
}
func deserializeTaskRunCloudEvents(meta *metav1.ObjectMeta, status *TaskRunStatus) error {
cloudEvents := []CloudEventDelivery{}
err := version.DeserializeFromMetadata(meta, &cloudEvents, cloudEventsAnnotationKey)
if err != nil {
return err
}
if len(cloudEvents) != 0 {
status.CloudEvents = cloudEvents
}
return nil
}
func serializeTaskRunResourcesResult(meta *metav1.ObjectMeta, status *TaskRunStatus) error {
if status.ResourcesResult == nil {
return nil
}
return version.SerializeToMetadata(meta, status.ResourcesResult, resourcesResultAnnotationKey)
}
func deserializeTaskRunResourcesResult(meta *metav1.ObjectMeta, status *TaskRunStatus) error {
resourcesResult := []RunResult{}
err := version.DeserializeFromMetadata(meta, &resourcesResult, resourcesResultAnnotationKey)
if err != nil {
return err
}
if len(resourcesResult) != 0 {
status.ResourcesResult = resourcesResult
}
return nil
}
func serializeTaskRunResourcesStatus(meta *metav1.ObjectMeta, status *TaskRunStatus) error {
if status.TaskSpec == nil {
return nil
}
if status.TaskSpec.Resources == nil {
return nil
}
return version.SerializeToMetadata(meta, status.TaskSpec.Resources, resourcesStatusAnnotationKey)
}
func deserializeTaskRunResourcesStatus(meta *metav1.ObjectMeta, status *TaskRunStatus) error {
resourcesStatus := &TaskResources{}
err := version.DeserializeFromMetadata(meta, resourcesStatus, resourcesStatusAnnotationKey)
if err != nil {
return err
}
if resourcesStatus.Inputs != nil || resourcesStatus.Outputs != nil {
if status.TaskRunStatusFields.TaskSpec == nil {
status.TaskSpec = &TaskSpec{}
}
status.TaskSpec.Resources = resourcesStatus
}
return nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/kmap"
)
var _ apis.Defaultable = (*TaskRun)(nil)
// ManagedByLabelKey is the label key used to mark what is managing this resource
const ManagedByLabelKey = "app.kubernetes.io/managed-by"
// SetDefaults implements apis.Defaultable
func (tr *TaskRun) SetDefaults(ctx context.Context) {
ctx = apis.WithinParent(ctx, tr.ObjectMeta)
tr.Spec.SetDefaults(ctx)
// Silently filtering out Tekton Reserved annotations at creation
if apis.IsInCreate(ctx) {
tr.ObjectMeta.Annotations = kmap.Filter(tr.ObjectMeta.Annotations, func(s string) bool {
return filterReservedAnnotationRegexp.MatchString(s)
})
}
// If the TaskRun doesn't have a managed-by label, apply the default
// specified in the config.
cfg := config.FromContextOrDefaults(ctx)
if tr.ObjectMeta.Labels == nil {
tr.ObjectMeta.Labels = map[string]string{}
}
if _, found := tr.ObjectMeta.Labels[ManagedByLabelKey]; !found {
tr.ObjectMeta.Labels[ManagedByLabelKey] = cfg.Defaults.DefaultManagedByLabelValue
}
}
// SetDefaults implements apis.Defaultable
func (trs *TaskRunSpec) SetDefaults(ctx context.Context) {
cfg := config.FromContextOrDefaults(ctx)
if trs.TaskRef != nil {
if trs.TaskRef.Name == "" && trs.TaskRef.Resolver == "" {
trs.TaskRef.Resolver = ResolverName(cfg.Defaults.DefaultResolverType)
}
if trs.TaskRef.Kind == "" && trs.TaskRef.Resolver == "" {
trs.TaskRef.Kind = NamespacedTaskKind
}
}
if trs.Timeout == nil {
trs.Timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute}
}
defaultSA := cfg.Defaults.DefaultServiceAccount
if trs.ServiceAccountName == "" && defaultSA != "" {
trs.ServiceAccountName = defaultSA
}
defaultPodTemplate := cfg.Defaults.DefaultPodTemplate
trs.PodTemplate = pod.MergePodTemplateWithDefault(trs.PodTemplate, defaultPodTemplate)
// If this taskrun has an embedded task, apply the usual task defaults
if trs.TaskSpec != nil {
trs.TaskSpec.SetDefaults(ctx)
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
apisconfig "github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
// TaskRunSpec defines the desired state of TaskRun
type TaskRunSpec struct {
// +optional
Debug *TaskRunDebug `json:"debug,omitempty"`
// +optional
Params Params `json:"params,omitempty"`
// Deprecated: Unused, preserved only for backwards compatibility
// +optional
Resources *TaskRunResources `json:"resources,omitempty"`
// +optional
ServiceAccountName string `json:"serviceAccountName"`
// no more than one of the TaskRef and TaskSpec may be specified.
// +optional
TaskRef *TaskRef `json:"taskRef,omitempty"`
// Specifying TaskSpec can be disabled by setting
// `disable-inline-spec` feature flag.
// See Task.spec (API version: tekton.dev/v1beta1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
TaskSpec *TaskSpec `json:"taskSpec,omitempty"`
// Used for cancelling a TaskRun (and maybe more later on)
// +optional
Status TaskRunSpecStatus `json:"status,omitempty"`
// Status message for cancellation.
// +optional
StatusMessage TaskRunSpecStatusMessage `json:"statusMessage,omitempty"`
// Retries represents how many times this TaskRun should be retried in the event of Task failure.
// +optional
Retries int `json:"retries,omitempty"`
// Time after which one retry attempt times out. Defaults to 1 hour.
// Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// PodTemplate holds pod specific configuration
PodTemplate *pod.PodTemplate `json:"podTemplate,omitempty"`
// Workspaces is a list of WorkspaceBindings from volumes to workspaces.
// +optional
// +listType=atomic
Workspaces []WorkspaceBinding `json:"workspaces,omitempty"`
// Overrides to apply to Steps in this TaskRun.
// If a field is specified in both a Step and a StepOverride,
// the value from the StepOverride will be used.
// This field is only supported when the alpha feature gate is enabled.
// +optional
// +listType=atomic
StepOverrides []TaskRunStepOverride `json:"stepOverrides,omitempty"`
// Overrides to apply to Sidecars in this TaskRun.
// If a field is specified in both a Sidecar and a SidecarOverride,
// the value from the SidecarOverride will be used.
// This field is only supported when the alpha feature gate is enabled.
// +optional
// +listType=atomic
SidecarOverrides []TaskRunSidecarOverride `json:"sidecarOverrides,omitempty"`
// Compute resources to use for this TaskRun
ComputeResources *corev1.ResourceRequirements `json:"computeResources,omitempty"`
// ManagedBy indicates which controller is responsible for reconciling
// this resource. If unset or set to "tekton.dev/pipeline", the default
// Tekton controller will manage this resource.
// This field is immutable.
// +optional
ManagedBy *string `json:"managedBy,omitempty"`
}
// TaskRunSpecStatus defines the TaskRun spec status the user can provide
type TaskRunSpecStatus string
const (
// TaskRunSpecStatusCancelled indicates that the user wants to cancel the task,
// if not already cancelled or terminated
TaskRunSpecStatusCancelled = "TaskRunCancelled"
)
// TaskRunSpecStatusMessage defines human readable status messages for the TaskRun.
type TaskRunSpecStatusMessage string
const (
// TaskRunCancelledByPipelineMsg indicates that the PipelineRun of which this
// TaskRun was a part of has been cancelled.
TaskRunCancelledByPipelineMsg TaskRunSpecStatusMessage = "TaskRun cancelled as the PipelineRun it belongs to has been cancelled."
// TaskRunCancelledByPipelineTimeoutMsg indicates that the TaskRun was cancelled because the PipelineRun running it timed out.
TaskRunCancelledByPipelineTimeoutMsg TaskRunSpecStatusMessage = "TaskRun cancelled as the PipelineRun it belongs to has timed out."
)
const (
// EnabledOnFailureBreakpoint is the value for TaskRunDebug.Breakpoints.OnFailure that means the breakpoint onFailure is enabled
EnabledOnFailureBreakpoint = "enabled"
)
// TaskRunDebug defines the breakpoint config for a particular TaskRun
type TaskRunDebug struct {
// +optional
Breakpoints *TaskBreakpoints `json:"breakpoints,omitempty"`
}
// TaskBreakpoints defines the breakpoint config for a particular Task
type TaskBreakpoints struct {
// if enabled, pause TaskRun on failure of a step
// failed step will not exit
// +optional
OnFailure string `json:"onFailure,omitempty"`
// +optional
// +listType=atomic
BeforeSteps []string `json:"beforeSteps,omitempty"`
}
// NeedsDebugOnFailure return true if the TaskRun is configured to debug on failure
func (trd *TaskRunDebug) NeedsDebugOnFailure() bool {
if trd.Breakpoints == nil {
return false
}
return trd.Breakpoints.OnFailure == EnabledOnFailureBreakpoint
}
// NeedsDebugBeforeStep return true if the step is configured to debug before execution
func (trd *TaskRunDebug) NeedsDebugBeforeStep(stepName string) bool {
if trd.Breakpoints == nil {
return false
}
beforeStepSets := sets.NewString(trd.Breakpoints.BeforeSteps...)
return beforeStepSets.Has(stepName)
}
// StepNeedsDebug return true if the step is configured to debug
func (trd *TaskRunDebug) StepNeedsDebug(stepName string) bool {
return trd.NeedsDebugOnFailure() || trd.NeedsDebugBeforeStep(stepName)
}
// HaveBeforeSteps return true if have any before steps
func (trd *TaskRunDebug) HaveBeforeSteps() bool {
return trd.Breakpoints != nil && len(trd.Breakpoints.BeforeSteps) > 0
}
// NeedsDebug return true if defined onfailure or have any before, after steps
func (trd *TaskRunDebug) NeedsDebug() bool {
return trd.NeedsDebugOnFailure() || trd.HaveBeforeSteps()
}
var taskRunCondSet = apis.NewBatchConditionSet()
// TaskRunStatus defines the observed state of TaskRun
type TaskRunStatus struct {
duckv1.Status `json:",inline"`
// TaskRunStatusFields inlines the status fields.
TaskRunStatusFields `json:",inline"`
}
// TaskRunConditionType is an enum used to store TaskRun custom
// conditions such as one used in spire results verification
type TaskRunConditionType string
const (
// TaskRunConditionResultsVerified is a Condition Type that indicates that the results were verified by spire
TaskRunConditionResultsVerified TaskRunConditionType = "SignedResultsVerified"
)
func (t TaskRunConditionType) String() string {
return string(t)
}
// TaskRunReason is an enum used to store all TaskRun reason for
// the Succeeded condition that are controlled by the TaskRun itself. Failure
// reasons that emerge from underlying resources are not included here
type TaskRunReason string
const (
// TaskRunReasonStarted is the reason set when the TaskRun has just started
TaskRunReasonStarted TaskRunReason = "Started"
// TaskRunReasonRunning is the reason set when the TaskRun is running
TaskRunReasonRunning TaskRunReason = "Running"
// TaskRunReasonSuccessful is the reason set when the TaskRun completed successfully
TaskRunReasonSuccessful TaskRunReason = "Succeeded"
// TaskRunReasonFailed is the reason set when the TaskRun completed with a failure
TaskRunReasonFailed TaskRunReason = "Failed"
// TaskRunReasonToBeRetried is the reason set when the last TaskRun execution failed, and will be retried
TaskRunReasonToBeRetried TaskRunReason = "ToBeRetried"
// TaskRunReasonCancelled is the reason set when the TaskRun is cancelled by the user
TaskRunReasonCancelled TaskRunReason = "TaskRunCancelled"
// TaskRunReasonTimedOut is the reason set when one TaskRun execution has timed out
TaskRunReasonTimedOut TaskRunReason = "TaskRunTimeout"
// TaskRunReasonResolvingTaskRef indicates that the TaskRun is waiting for
// its taskRef to be asynchronously resolved.
TaskRunReasonResolvingTaskRef = "ResolvingTaskRef"
// TaskRunReasonImagePullFailed is the reason set when the step of a task fails due to image not being pulled
TaskRunReasonImagePullFailed TaskRunReason = "TaskRunImagePullFailed"
// TaskRunReasonResultsVerified is the reason set when the TaskRun results are verified by spire
TaskRunReasonResultsVerified TaskRunReason = "TaskRunResultsVerified"
// TaskRunReasonsResultsVerificationFailed is the reason set when the TaskRun results are failed to verify by spire
TaskRunReasonsResultsVerificationFailed TaskRunReason = "TaskRunResultsVerificationFailed"
// AwaitingTaskRunResults is the reason set when waiting upon `TaskRun` results and signatures to verify
AwaitingTaskRunResults TaskRunReason = "AwaitingTaskRunResults"
// TaskRunReasonResultLargerThanAllowedLimit is the reason set when one of the results exceeds its maximum allowed limit of 1 KB
TaskRunReasonResultLargerThanAllowedLimit TaskRunReason = "TaskRunResultLargerThanAllowedLimit"
// TaskRunReasonStopSidecarFailed indicates that the sidecar is not properly stopped.
TaskRunReasonStopSidecarFailed = "TaskRunStopSidecarFailed"
)
func (t TaskRunReason) String() string {
return string(t)
}
// GetStartedReason returns the reason set to the "Succeeded" condition when
// InitializeConditions is invoked
func (trs *TaskRunStatus) GetStartedReason() string {
return TaskRunReasonStarted.String()
}
// GetRunningReason returns the reason set to the "Succeeded" condition when
// the TaskRun starts running. This is used indicate that the resource
// could be validated is starting to perform its job.
func (trs *TaskRunStatus) GetRunningReason() string {
return TaskRunReasonRunning.String()
}
// MarkResourceOngoing sets the ConditionSucceeded condition to ConditionUnknown
// with the reason and message.
func (trs *TaskRunStatus) MarkResourceOngoing(reason TaskRunReason, message string) {
taskRunCondSet.Manage(trs).SetCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
Reason: reason.String(),
Message: message,
})
}
// MarkResourceFailed sets the ConditionSucceeded condition to ConditionFalse
// based on an error that occurred and a reason
func (trs *TaskRunStatus) MarkResourceFailed(reason TaskRunReason, err error) {
taskRunCondSet.Manage(trs).SetCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: reason.String(),
Message: err.Error(),
})
succeeded := trs.GetCondition(apis.ConditionSucceeded)
trs.CompletionTime = &succeeded.LastTransitionTime.Inner
}
// +listType=atomic
type RetriesStatus []TaskRunStatus
// TaskRunStatusFields holds the fields of TaskRun's status. This is defined
// separately and inlined so that other types can readily consume these fields
// via duck typing.
type TaskRunStatusFields struct {
// PodName is the name of the pod responsible for executing this task's steps.
PodName string `json:"podName"`
// StartTime is the time the build is actually started.
StartTime *metav1.Time `json:"startTime,omitempty"`
// CompletionTime is the time the build completed.
CompletionTime *metav1.Time `json:"completionTime,omitempty"`
// Steps describes the state of each build step container.
// +optional
// +listType=atomic
Steps []StepState `json:"steps,omitempty"`
// CloudEvents describe the state of each cloud event requested via a
// CloudEventResource.
//
// Deprecated: Removed in v0.44.0.
//
// +optional
// +listType=atomic
CloudEvents []CloudEventDelivery `json:"cloudEvents,omitempty"`
// RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures.
// All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant.
// See TaskRun.status (API version: tekton.dev/v1beta1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
RetriesStatus RetriesStatus `json:"retriesStatus,omitempty"`
// Results from Resources built during the TaskRun.
// This is tomb-stoned along with the removal of pipelineResources
// Deprecated: this field is not populated and is preserved only for backwards compatibility
// +optional
// +listType=atomic
ResourcesResult []PipelineResourceResult `json:"resourcesResult,omitempty"`
// TaskRunResults are the list of results written out by the task's containers
// +optional
// +listType=atomic
TaskRunResults []TaskRunResult `json:"taskResults,omitempty"`
// The list has one entry per sidecar in the manifest. Each entry is
// represents the imageid of the corresponding sidecar.
// +listType=atomic
Sidecars []SidecarState `json:"sidecars,omitempty"`
// TaskSpec contains the Spec from the dereferenced Task definition used to instantiate this TaskRun.
// See Task.spec (API version tekton.dev/v1beta1)
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
TaskSpec *TaskSpec `json:"taskSpec,omitempty"`
// Provenance contains some key authenticated metadata about how a software artifact was built (what sources, what inputs/outputs, etc.).
// +optional
Provenance *Provenance `json:"provenance,omitempty"`
// SpanContext contains tracing span context fields
SpanContext map[string]string `json:"spanContext,omitempty"`
}
// TaskRunStepOverride is used to override the values of a Step in the corresponding Task.
type TaskRunStepOverride struct {
// The name of the Step to override.
Name string `json:"name"`
// The resource requirements to apply to the Step.
Resources corev1.ResourceRequirements `json:"resources"`
}
// TaskRunSidecarOverride is used to override the values of a Sidecar in the corresponding Task.
type TaskRunSidecarOverride struct {
// The name of the Sidecar to override.
Name string `json:"name"`
// The resource requirements to apply to the Sidecar.
Resources corev1.ResourceRequirements `json:"resources"`
}
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*TaskRun) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind(pipeline.TaskRunControllerName)
}
// GetStatusCondition returns the task run status as a ConditionAccessor
func (tr *TaskRun) GetStatusCondition() apis.ConditionAccessor {
return &tr.Status
}
// GetCondition returns the Condition matching the given type.
func (trs *TaskRunStatus) GetCondition(t apis.ConditionType) *apis.Condition {
return taskRunCondSet.Manage(trs).GetCondition(t)
}
// InitializeConditions will set all conditions in taskRunCondSet to unknown for the TaskRun
// and set the started time to the current time
func (trs *TaskRunStatus) InitializeConditions() {
started := false
if trs.StartTime.IsZero() {
trs.StartTime = &metav1.Time{Time: time.Now()}
started = true
}
conditionManager := taskRunCondSet.Manage(trs)
conditionManager.InitializeConditions()
// Ensure the started reason is set for the "Succeeded" condition
if started {
initialCondition := conditionManager.GetCondition(apis.ConditionSucceeded)
initialCondition.Reason = TaskRunReasonStarted.String()
conditionManager.SetCondition(*initialCondition)
}
}
// SetCondition sets the condition, unsetting previous conditions with the same
// type as necessary.
func (trs *TaskRunStatus) SetCondition(newCond *apis.Condition) {
if newCond != nil {
taskRunCondSet.Manage(trs).SetCondition(*newCond)
}
}
// StepState reports the results of running a step in a Task.
type StepState struct {
corev1.ContainerState `json:",inline"`
Name string `json:"name,omitempty"`
ContainerName string `json:"container,omitempty"`
ImageID string `json:"imageID,omitempty"`
Results []TaskRunStepResult `json:"results,omitempty"`
Provenance *Provenance `json:"provenance,omitempty"`
Inputs []TaskRunStepArtifact `json:"inputs,omitempty"`
Outputs []TaskRunStepArtifact `json:"outputs,omitempty"`
}
// SidecarState reports the results of running a sidecar in a Task.
type SidecarState struct {
corev1.ContainerState `json:",inline"`
Name string `json:"name,omitempty"`
ContainerName string `json:"container,omitempty"`
ImageID string `json:"imageID,omitempty"`
}
// CloudEventDelivery is the target of a cloud event along with the state of
// delivery.
type CloudEventDelivery struct {
// Target points to an addressable
Target string `json:"target,omitempty"`
Status CloudEventDeliveryState `json:"status,omitempty"`
}
// CloudEventCondition is a string that represents the condition of the event.
type CloudEventCondition string
const (
// CloudEventConditionUnknown means that the condition for the event to be
// triggered was not met yet, or we don't know the state yet.
CloudEventConditionUnknown CloudEventCondition = "Unknown"
// CloudEventConditionSent means that the event was sent successfully
CloudEventConditionSent CloudEventCondition = "Sent"
// CloudEventConditionFailed means that there was one or more attempts to
// send the event, and none was successful so far.
CloudEventConditionFailed CloudEventCondition = "Failed"
)
// CloudEventDeliveryState reports the state of a cloud event to be sent.
type CloudEventDeliveryState struct {
// Current status
Condition CloudEventCondition `json:"condition,omitempty"`
// SentAt is the time at which the last attempt to send the event was made
// +optional
SentAt *metav1.Time `json:"sentAt,omitempty"`
// Error is the text of error (if any)
Error string `json:"message"`
// RetryCount is the number of attempts of sending the cloud event
RetryCount int32 `json:"retryCount"`
}
// +genclient
// +genreconciler:krshapedlogic=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// TaskRun represents a single execution of a Task. TaskRuns are how the steps
// specified in a Task are executed; they specify the parameters and resources
// used to run the steps in a Task.
//
// Deprecated: Please use v1.TaskRun instead.
type TaskRun struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec TaskRunSpec `json:"spec,omitempty"`
// +optional
Status TaskRunStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// TaskRunList contains a list of TaskRun
type TaskRunList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []TaskRun `json:"items"`
}
// GetPipelineRunPVCName for TaskRun gets pipelinerun
func (tr *TaskRun) GetPipelineRunPVCName() string {
if tr == nil {
return ""
}
for _, ref := range tr.GetOwnerReferences() {
if ref.Kind == pipeline.PipelineRunControllerName {
return ref.Name + "-pvc"
}
}
return ""
}
// HasPipelineRunOwnerReference returns true of TaskRun has
// owner reference of type PipelineRun
func (tr *TaskRun) HasPipelineRunOwnerReference() bool {
for _, ref := range tr.GetOwnerReferences() {
if ref.Kind == pipeline.PipelineRunControllerName {
return true
}
}
return false
}
// IsDone returns true if the TaskRun's status indicates that it is done.
func (tr *TaskRun) IsDone() bool {
return !tr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown()
}
// HasStarted function check whether TaskRun has valid start time set in its status
func (tr *TaskRun) HasStarted() bool {
return tr.Status.StartTime != nil && !tr.Status.StartTime.IsZero()
}
// IsSuccessful returns true if the TaskRun's status indicates that it has succeeded.
func (tr *TaskRun) IsSuccessful() bool {
return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue()
}
// IsFailure returns true if the TaskRun's status indicates that it has failed.
func (tr *TaskRun) IsFailure() bool {
return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsFalse()
}
// IsCancelled returns true if the TaskRun's spec status is set to Cancelled state
func (tr *TaskRun) IsCancelled() bool {
return tr.Spec.Status == TaskRunSpecStatusCancelled
}
// IsTaskRunResultVerified returns true if the TaskRun's results have been validated by spire.
func (tr *TaskRun) IsTaskRunResultVerified() bool {
return tr.Status.GetCondition(apis.ConditionType(TaskRunConditionResultsVerified.String())).IsTrue()
}
// IsTaskRunResultDone returns true if the TaskRun's results are available for verification
func (tr *TaskRun) IsTaskRunResultDone() bool {
return !tr.Status.GetCondition(apis.ConditionType(TaskRunConditionResultsVerified.String())).IsUnknown()
}
// IsRetriable returns true if the TaskRun's Retries is not exhausted.
func (tr *TaskRun) IsRetriable() bool {
return len(tr.Status.RetriesStatus) < tr.Spec.Retries
}
// HasTimedOut returns true if the TaskRun runtime is beyond the allowed timeout
func (tr *TaskRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bool {
if tr.Status.StartTime.IsZero() {
return false
}
timeout := tr.GetTimeout(ctx)
// If timeout is set to 0 or defaulted to 0, there is no timeout.
if timeout == apisconfig.NoTimeoutDuration {
return false
}
runtime := c.Since(tr.Status.StartTime.Time)
return runtime > timeout
}
// GetTimeout returns the timeout for the TaskRun, or the default if not specified
func (tr *TaskRun) GetTimeout(ctx context.Context) time.Duration {
// Use the platform default is no timeout is set
if tr.Spec.Timeout == nil {
defaultTimeout := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes)
return defaultTimeout * time.Minute //nolint:durationcheck
}
return tr.Spec.Timeout.Duration
}
// GetNamespacedName returns a k8s namespaced name that identifies this TaskRun
func (tr *TaskRun) GetNamespacedName() types.NamespacedName {
return types.NamespacedName{Namespace: tr.Namespace, Name: tr.Name}
}
// HasVolumeClaimTemplate returns true if TaskRun contains volumeClaimTemplates that is
// used for creating PersistentVolumeClaims with an OwnerReference for each run
func (tr *TaskRun) HasVolumeClaimTemplate() bool {
for _, ws := range tr.Spec.Workspaces {
if ws.VolumeClaimTemplate != nil {
return true
}
}
return false
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
"github.com/tektoncd/pipeline/pkg/apis/validate"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/strings/slices"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var (
_ apis.Validatable = (*TaskRun)(nil)
_ resourcesemantics.VerbLimited = (*TaskRun)(nil)
)
// SupportedVerbs returns the operations that validation should be called for
func (tr *TaskRun) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate taskrun
func (tr *TaskRun) Validate(ctx context.Context) *apis.FieldError {
errs := validate.ObjectMetadata(tr.GetObjectMeta()).ViaField("metadata")
return errs.Also(tr.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec"))
}
// Validate taskrun spec
func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
// Validate the spec changes
errs = errs.Also(ts.ValidateUpdate(ctx))
// Must have exactly one of taskRef and taskSpec.
if ts.TaskRef == nil && ts.TaskSpec == nil {
errs = errs.Also(apis.ErrMissingOneOf("taskRef", "taskSpec"))
}
if ts.TaskRef != nil && ts.TaskSpec != nil {
errs = errs.Also(apis.ErrMultipleOneOf("taskRef", "taskSpec"))
}
// Validate TaskRef if it's present.
if ts.TaskRef != nil {
errs = errs.Also(ts.TaskRef.Validate(ctx).ViaField("taskRef"))
}
// Validate TaskSpec if it's present.
if ts.TaskSpec != nil {
if slices.Contains(strings.Split(
config.FromContextOrDefaults(ctx).FeatureFlags.DisableInlineSpec, ","), "taskrun") {
errs = errs.Also(apis.ErrDisallowedFields("taskSpec"))
}
errs = errs.Also(ts.TaskSpec.Validate(ctx).ViaField("taskSpec"))
}
errs = errs.Also(ValidateParameters(ctx, ts.Params).ViaField("params"))
// Validate propagated parameters
errs = errs.Also(ts.validateInlineParameters(ctx))
errs = errs.Also(ValidateWorkspaceBindings(ctx, ts.Workspaces).ViaField("workspaces"))
if ts.Debug != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "debug", config.AlphaAPIFields).ViaField("debug"))
errs = errs.Also(validateDebug(ts.Debug).ViaField("debug"))
}
if ts.StepOverrides != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepOverrides", config.BetaAPIFields).ViaField("stepOverrides"))
errs = errs.Also(validateStepOverrides(ts.StepOverrides).ViaField("stepOverrides"))
}
if ts.SidecarOverrides != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.BetaAPIFields).ViaField("sidecarOverrides"))
errs = errs.Also(validateSidecarOverrides(ts.SidecarOverrides).ViaField("sidecarOverrides"))
}
if ts.ComputeResources != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "computeResources", config.BetaAPIFields).ViaField("computeResources"))
errs = errs.Also(validateTaskRunComputeResources(ts.ComputeResources, ts.StepOverrides))
}
if ts.Status != "" {
if ts.Status != TaskRunSpecStatusCancelled {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s should be %s", ts.Status, TaskRunSpecStatusCancelled), "status"))
}
}
if ts.Status == "" {
if ts.StatusMessage != "" {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("statusMessage should not be set if status is not set, but it is currently set to %s", ts.StatusMessage), "statusMessage"))
}
}
if ts.PodTemplate != nil {
errs = errs.Also(validatePodTemplateEnv(ctx, *ts.PodTemplate))
}
if ts.Timeout != nil && ts.Timeout.Duration < 0 {
errs = errs.Also(apis.ErrInvalidValue(ts.Timeout.Duration.String()+" should be >= 0", "timeout"))
}
if ts.Resources != nil {
errs = errs.Also(apis.ErrDisallowedFields("resources"))
}
return errs
}
// ValidateUpdate validates the update of a TaskRunSpec
func (ts *TaskRunSpec) ValidateUpdate(ctx context.Context) (errs *apis.FieldError) {
if !apis.IsInUpdate(ctx) {
return
}
oldObj, ok := apis.GetBaseline(ctx).(*TaskRun)
if !ok || oldObj == nil {
return
}
if (oldObj.Spec.ManagedBy == nil) != (ts.ManagedBy == nil) || (oldObj.Spec.ManagedBy != nil && *oldObj.Spec.ManagedBy != *ts.ManagedBy) {
errs = errs.Also(apis.ErrInvalidValue("managedBy is immutable", "spec.managedBy"))
}
if oldObj.IsDone() {
// try comparing without any copying first
// this handles the common case where only finalizers changed
if equality.Semantic.DeepEqual(&oldObj.Spec, ts) {
return nil // Specs identical, allow update
}
// Specs differ, this could be due to different defaults after upgrade
// Apply current defaults to old spec to normalize
oldCopy := oldObj.Spec.DeepCopy()
oldCopy.SetDefaults(ctx)
if equality.Semantic.DeepEqual(oldCopy, ts) {
return nil // Difference was only defaults, allow update
}
// Real spec changes detected, reject update
errs = errs.Also(apis.ErrInvalidValue("Once the TaskRun is complete, no updates are allowed", ""))
return errs
}
// Handle started but not done case
old := oldObj.Spec.DeepCopy()
old.Status = ts.Status
old.StatusMessage = ts.StatusMessage
old.ManagedBy = ts.ManagedBy // Already tested before
if !equality.Semantic.DeepEqual(old, ts) {
errs = errs.Also(apis.ErrInvalidValue("Once the TaskRun has started, only status and statusMessage updates are allowed", ""))
}
return
}
// validateInlineParameters validates that any parameters called in the
// Task spec are declared in the TaskRun.
// This is crucial for propagated parameters because the parameters could
// be defined under taskRun and then called directly in the task steps.
// In this case, parameters cannot be validated by the underlying taskSpec
// since they may not have the parameters declared because of propagation.
func (ts *TaskRunSpec) validateInlineParameters(ctx context.Context) (errs *apis.FieldError) {
if ts.TaskSpec == nil {
return errs
}
paramSpecForValidation := make(map[string]ParamSpec)
for _, p := range ts.Params {
paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation)
}
for _, p := range ts.TaskSpec.Params {
var err *apis.FieldError
paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation)
if err != nil {
errs = errs.Also(err)
}
}
var paramSpec []ParamSpec
for _, v := range paramSpecForValidation {
paramSpec = append(paramSpec, v)
}
if ts.TaskSpec != nil && ts.TaskSpec.Steps != nil {
errs = errs.Also(ValidateParameterTypes(ctx, paramSpec))
errs = errs.Also(ValidateParameterVariables(ctx, ts.TaskSpec.Steps, paramSpec))
errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, ts.TaskSpec.Steps, paramSpec))
}
return errs
}
func validatePodTemplateEnv(ctx context.Context, podTemplate pod.Template) (errs *apis.FieldError) {
forbiddenEnvsConfigured := config.FromContextOrDefaults(ctx).Defaults.DefaultForbiddenEnv
if len(forbiddenEnvsConfigured) == 0 {
return errs
}
for _, pEnv := range podTemplate.Env {
if slices.Contains(forbiddenEnvsConfigured, pEnv.Name) {
errs = errs.Also(apis.ErrInvalidValue("PodTemplate cannot update a forbidden env: "+pEnv.Name, "PodTemplate.Env"))
}
}
return errs
}
func createParamSpecFromParam(p Param, paramSpecForValidation map[string]ParamSpec) map[string]ParamSpec {
value := p.Value
pSpec := ParamSpec{
Name: p.Name,
Default: &value,
Type: p.Value.Type,
}
if p.Value.ObjectVal != nil {
pSpec.Properties = make(map[string]PropertySpec)
prop := make(map[string]PropertySpec)
for k := range p.Value.ObjectVal {
prop[k] = PropertySpec{Type: ParamTypeString}
}
pSpec.Properties = prop
}
paramSpecForValidation[p.Name] = pSpec
return paramSpecForValidation
}
func combineParamSpec(p ParamSpec, paramSpecForValidation map[string]ParamSpec) (map[string]ParamSpec, *apis.FieldError) {
if pSpec, ok := paramSpecForValidation[p.Name]; ok {
// Merge defaults with provided values in the taskrun.
if p.Default != nil && p.Default.ObjectVal != nil {
for k, v := range p.Default.ObjectVal {
if pSpec.Default.ObjectVal == nil {
pSpec.Default.ObjectVal = map[string]string{k: v}
} else {
pSpec.Default.ObjectVal[k] = v
}
}
// If Default values of object type are provided then Properties must also be fully declared.
if p.Properties == nil {
return paramSpecForValidation, apis.ErrMissingField(p.Name + ".properties")
}
}
// Properties must be defined if paramSpec is of object Type
if pSpec.Type == ParamTypeObject {
if p.Properties == nil {
return paramSpecForValidation, apis.ErrMissingField(p.Name + ".properties")
}
// Expect Properties to be complete
pSpec.Properties = p.Properties
}
paramSpecForValidation[p.Name] = pSpec
} else {
// No values provided by task run but found a paramSpec declaration.
// Expect it to be fully speced out.
paramSpecForValidation[p.Name] = p
}
return paramSpecForValidation, nil
}
// validateDebug validates the debug section of the TaskRun.
// if set, onFailure breakpoint must be "enabled"
func validateDebug(db *TaskRunDebug) (errs *apis.FieldError) {
if db == nil || db.Breakpoints == nil {
return errs
}
if db.Breakpoints.OnFailure == "" {
errs = errs.Also(apis.ErrInvalidValue("onFailure breakpoint is empty, it is only allowed to be set as enabled", "breakpoints.onFailure"))
}
if db.Breakpoints.OnFailure != "" && db.Breakpoints.OnFailure != EnabledOnFailureBreakpoint {
errs = errs.Also(apis.ErrInvalidValue(db.Breakpoints.OnFailure+" is not a valid onFailure breakpoint value, onFailure breakpoint is only allowed to be set as enabled", "breakpoints.onFailure"))
}
beforeSteps := sets.NewString()
for i, step := range db.Breakpoints.BeforeSteps {
if beforeSteps.Has(step) {
errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("before step must be unique, the same step: %s is defined multiple times at", step), fmt.Sprintf("breakpoints.beforeSteps[%d]", i)))
}
beforeSteps.Insert(step)
}
return errs
}
// ValidateWorkspaceBindings makes sure the volumes provided for the Task's declared workspaces make sense.
func ValidateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs *apis.FieldError) {
var names []string
for idx, w := range wb {
names = append(names, w.Name)
errs = errs.Also(w.Validate(ctx).ViaIndex(idx))
}
errs = errs.Also(validateNoDuplicateNames(names, true))
return errs
}
// ValidateParameters makes sure the params for the Task are valid.
func ValidateParameters(ctx context.Context, params Params) (errs *apis.FieldError) {
var names []string
for _, p := range params {
names = append(names, p.Name)
}
return errs.Also(validateNoDuplicateNames(names, false))
}
func validateStepOverrides(overrides []TaskRunStepOverride) (errs *apis.FieldError) {
var names []string
for i, o := range overrides {
if o.Name == "" {
errs = errs.Also(apis.ErrMissingField("name").ViaIndex(i))
} else {
names = append(names, o.Name)
}
}
errs = errs.Also(validateNoDuplicateNames(names, true))
return errs
}
// validateTaskRunComputeResources ensures that compute resources are not configured at both the step level and the task level
func validateTaskRunComputeResources(computeResources *corev1.ResourceRequirements, overrides []TaskRunStepOverride) (errs *apis.FieldError) {
for _, override := range overrides {
if override.Resources.Size() != 0 && computeResources != nil {
return apis.ErrMultipleOneOf(
"stepOverrides.resources",
"computeResources",
)
}
}
return nil
}
func validateSidecarOverrides(overrides []TaskRunSidecarOverride) (errs *apis.FieldError) {
var names []string
for i, o := range overrides {
if o.Name == "" {
errs = errs.Also(apis.ErrMissingField("name").ViaIndex(i))
} else {
names = append(names, o.Name)
}
}
errs = errs.Also(validateNoDuplicateNames(names, true))
return errs
}
// validateNoDuplicateNames returns an error for each name that is repeated in names.
// Case insensitive.
// If byIndex is true, the error will be reported by index instead of by key.
func validateNoDuplicateNames(names []string, byIndex bool) (errs *apis.FieldError) {
seen := sets.NewString()
for i, n := range names {
if seen.Has(strings.ToLower(n)) {
if byIndex {
errs = errs.Also(apis.ErrMultipleOneOf("name").ViaIndex(i))
} else {
errs = errs.Also(apis.ErrMultipleOneOf("name").ViaKey(n))
}
}
seen.Insert(strings.ToLower(n))
}
return errs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
"github.com/tektoncd/pipeline/pkg/substitution"
"k8s.io/apimachinery/pkg/selection"
)
// WhenExpression allows a PipelineTask to declare expressions to be evaluated before the Task is run
// to determine whether the Task should be executed or skipped
type WhenExpression struct {
// Input is the string for guard checking which can be a static input or an output from a parent Task
Input string `json:"input,omitempty"`
// Operator that represents an Input's relationship to the values
Operator selection.Operator `json:"operator,omitempty"`
// Values is an array of strings, which is compared against the input, for guard checking
// It must be non-empty
// +listType=atomic
Values []string `json:"values,omitempty"`
// CEL is a string of Common Language Expression, which can be used to conditionally execute
// the task based on the result of the expression evaluation
// More info about CEL syntax: https://github.com/google/cel-spec/blob/master/doc/langdef.md
// +optional
CEL string `json:"cel,omitempty"`
}
func (we *WhenExpression) isInputInValues() bool {
for i := range we.Values {
if we.Values[i] == we.Input {
return true
}
}
return false
}
func (we *WhenExpression) isTrue() bool {
if we.Operator == selection.In {
return we.isInputInValues()
}
// selection.NotIn
return !we.isInputInValues()
}
func (we *WhenExpression) applyReplacements(replacements map[string]string, arrayReplacements map[string][]string) WhenExpression {
replacedInput := substitution.ApplyReplacements(we.Input, replacements)
replacedCEL := substitution.ApplyReplacements(we.CEL, replacements)
var replacedValues []string
for _, val := range we.Values {
// arrayReplacements holds a list of array parameters with a pattern - params.arrayParam1
// array params are referenced using $(params.arrayParam1[*])
// array results are referenced using $(results.resultname[*])
// check if the param exist in the arrayReplacements to replace it with a list of values
if _, ok := arrayReplacements[fmt.Sprintf("%s.%s", ParamsPrefix, ArrayReference(val))]; ok {
replacedValues = append(replacedValues, substitution.ApplyArrayReplacements(val, replacements, arrayReplacements)...)
} else if _, ok := arrayReplacements[ResultsArrayReference(val)]; ok {
replacedValues = append(replacedValues, substitution.ApplyArrayReplacements(val, replacements, arrayReplacements)...)
} else {
replacedValues = append(replacedValues, substitution.ApplyReplacements(val, replacements))
}
}
return WhenExpression{Input: replacedInput, Operator: we.Operator, Values: replacedValues, CEL: replacedCEL}
}
// GetVarSubstitutionExpressions extracts all the values between "$(" and ")" in a When Expression
func (we *WhenExpression) GetVarSubstitutionExpressions() ([]string, bool) {
var allExpressions []string
allExpressions = append(allExpressions, validateString(we.Input)...)
allExpressions = append(allExpressions, validateString(we.CEL)...)
for _, value := range we.Values {
allExpressions = append(allExpressions, validateString(value)...)
}
return allExpressions, len(allExpressions) != 0
}
// WhenExpressions are used to specify whether a Task should be executed or skipped
// All of them need to evaluate to True for a guarded Task to be executed.
type WhenExpressions []WhenExpression
type StepWhenExpressions = WhenExpressions
// AllowsExecution evaluates an Input's relationship to an array of Values, based on the Operator,
// to determine whether all the When Expressions are True. If they are all True, the guarded Task is
// executed, otherwise it is skipped.
// If CEL expression exists, AllowsExecution will get the evaluated results from evaluatedCEL and determine
// if the Task should be skipped.
func (wes WhenExpressions) AllowsExecution(evaluatedCEL map[string]bool) bool {
for _, we := range wes {
if !we.isTrue() || (we.CEL != "" && !evaluatedCEL[we.CEL]) {
return false
}
}
return true
}
// ReplaceVariables interpolates variables, such as Parameters and Results, in
// the Input and Values.
func (wes WhenExpressions) ReplaceVariables(replacements map[string]string, arrayReplacements map[string][]string) WhenExpressions {
replaced := wes
for i := range wes {
replaced[i] = wes[i].applyReplacements(replacements, arrayReplacements)
}
return replaced
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"strings"
"github.com/google/cel-go/cel"
"github.com/tektoncd/pipeline/pkg/apis/config"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/apis"
)
var validWhenOperators = []string{
string(selection.In),
string(selection.NotIn),
}
func (wes WhenExpressions) validate(ctx context.Context) *apis.FieldError {
return wes.validateWhenExpressionsFields(ctx).ViaField("when")
}
func (wes WhenExpressions) validateWhenExpressionsFields(ctx context.Context) (errs *apis.FieldError) {
for idx, we := range wes {
errs = errs.Also(we.validateWhenExpressionFields(ctx).ViaIndex(idx))
}
return errs
}
func (we *WhenExpression) validateWhenExpressionFields(ctx context.Context) *apis.FieldError {
if we.CEL != "" {
if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableCELInWhenExpression {
return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use CEL: %s in WhenExpression", config.EnableCELInWhenExpression, we.CEL), "")
}
if we.Input != "" || we.Operator != "" || len(we.Values) != 0 {
return apis.ErrGeneric(fmt.Sprintf("cel and input+operator+values cannot be set in one WhenExpression: %v", we))
}
// We need to compile the CEL expression and check if it is a valid expression
// note that at the validation webhook, Tekton's variables are not substituted,
// so they need to be wrapped with single quotes.
// e.g. This is a valid CEL expression: '$(params.foo)' == 'foo';
// But this is not a valid expression since CEL cannot recognize: $(params.foo) == 'foo';
// This is not valid since we don't pass params to CEL's environment: params.foo == 'foo';
env, _ := cel.NewEnv()
_, iss := env.Compile(we.CEL)
if iss.Err() != nil {
return apis.ErrGeneric("invalid cel expression: %s with err: %s", we.CEL, iss.Err().Error())
}
return nil
}
if equality.Semantic.DeepEqual(we, &WhenExpression{}) || we == nil {
return apis.ErrMissingField(apis.CurrentField)
}
if !sets.NewString(validWhenOperators...).Has(string(we.Operator)) {
message := fmt.Sprintf("operator %q is not recognized. valid operators: %s", we.Operator, strings.Join(validWhenOperators, ","))
return apis.ErrInvalidValue(message, apis.CurrentField)
}
if len(we.Values) == 0 {
return apis.ErrInvalidValue("expecting non-empty values field", apis.CurrentField)
}
return nil
}
func (wes WhenExpressions) validatePipelineParametersVariables(prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) {
for idx, we := range wes {
errs = errs.Also(validateStringVariable(we.Input, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("input").ViaFieldIndex("when", idx))
for _, val := range we.Values {
// one of the values could be a reference to an array param, such as, $(params.foo[*])
// extract the variable name from the pattern $(params.foo[*]), if the variable name matches with one of the array params
// validate the param as an array variable otherwise, validate it as a string variable
if arrayParamNames.Has(ArrayReference(val)) {
errs = errs.Also(validateArrayVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("values").ViaFieldIndex("when", idx))
} else {
errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("values").ViaFieldIndex("when", idx))
}
}
}
return errs
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
)
func (w WorkspaceDeclaration) convertTo(ctx context.Context, sink *v1.WorkspaceDeclaration) {
sink.Name = w.Name
sink.Description = w.Description
sink.MountPath = w.MountPath
sink.ReadOnly = w.ReadOnly
sink.Optional = w.Optional
}
func (w *WorkspaceDeclaration) convertFrom(ctx context.Context, source v1.WorkspaceDeclaration) {
w.Name = source.Name
w.Description = source.Description
w.MountPath = source.MountPath
w.ReadOnly = source.ReadOnly
w.Optional = source.Optional
}
func (w WorkspaceUsage) convertTo(ctx context.Context, sink *v1.WorkspaceUsage) {
sink.Name = w.Name
sink.MountPath = w.MountPath
}
func (w *WorkspaceUsage) convertFrom(ctx context.Context, source v1.WorkspaceUsage) {
w.Name = source.Name
w.MountPath = source.MountPath
}
func (w PipelineWorkspaceDeclaration) convertTo(ctx context.Context, sink *v1.PipelineWorkspaceDeclaration) {
sink.Name = w.Name
sink.Description = w.Description
sink.Optional = w.Optional
}
func (w *PipelineWorkspaceDeclaration) convertFrom(ctx context.Context, source v1.PipelineWorkspaceDeclaration) {
w.Name = source.Name
w.Description = source.Description
w.Optional = source.Optional
}
func (w WorkspacePipelineTaskBinding) convertTo(ctx context.Context, sink *v1.WorkspacePipelineTaskBinding) {
sink.Name = w.Name
sink.Workspace = w.Workspace
sink.SubPath = w.SubPath
}
func (w *WorkspacePipelineTaskBinding) convertFrom(ctx context.Context, source v1.WorkspacePipelineTaskBinding) {
w.Name = source.Name
w.Workspace = source.Workspace
w.SubPath = source.SubPath
}
func (w WorkspaceBinding) convertTo(ctx context.Context, sink *v1.WorkspaceBinding) {
sink.Name = w.Name
sink.SubPath = w.SubPath
sink.VolumeClaimTemplate = w.VolumeClaimTemplate
sink.PersistentVolumeClaim = w.PersistentVolumeClaim
sink.EmptyDir = w.EmptyDir
sink.ConfigMap = w.ConfigMap
sink.Secret = w.Secret
sink.Projected = w.Projected
sink.CSI = w.CSI
}
// ConvertFrom converts v1beta1 Param from v1 Param
func (w *WorkspaceBinding) ConvertFrom(ctx context.Context, source v1.WorkspaceBinding) {
w.Name = source.Name
w.SubPath = source.SubPath
w.VolumeClaimTemplate = source.VolumeClaimTemplate
w.PersistentVolumeClaim = source.PersistentVolumeClaim
w.EmptyDir = source.EmptyDir
w.ConfigMap = source.ConfigMap
w.Secret = source.Secret
w.Projected = source.Projected
w.CSI = source.CSI
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"path/filepath"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
corev1 "k8s.io/api/core/v1"
)
// WorkspaceDeclaration is a declaration of a volume that a Task requires.
type WorkspaceDeclaration struct {
// Name is the name by which you can bind the volume at runtime.
Name string `json:"name"`
// Description is an optional human readable description of this volume.
// +optional
Description string `json:"description,omitempty"`
// MountPath overrides the directory that the volume will be made available at.
// +optional
MountPath string `json:"mountPath,omitempty"`
// ReadOnly dictates whether a mounted volume is writable. By default this
// field is false and so mounted volumes are writable.
ReadOnly bool `json:"readOnly,omitempty"`
// Optional marks a Workspace as not being required in TaskRuns. By default
// this field is false and so declared workspaces are required.
Optional bool `json:"optional,omitempty"`
}
// GetMountPath returns the mountPath for w which is the MountPath if provided or the
// default if not.
func (w *WorkspaceDeclaration) GetMountPath() string {
if w.MountPath != "" {
return w.MountPath
}
return filepath.Join(pipeline.WorkspaceDir, w.Name)
}
// WorkspaceBinding maps a Task's declared workspace to a Volume.
type WorkspaceBinding struct {
// Name is the name of the workspace populated by the volume.
Name string `json:"name"`
// SubPath is optionally a directory on the volume which should be used
// for this binding (i.e. the volume will be mounted at this sub directory).
// +optional
SubPath string `json:"subPath,omitempty"`
// VolumeClaimTemplate is a template for a claim that will be created in the same namespace.
// The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun.
// See PersistentVolumeClaim (API version: v1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
VolumeClaimTemplate *corev1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
// PersistentVolumeClaimVolumeSource represents a reference to a
// PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.
// +optional
PersistentVolumeClaim *corev1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"`
// EmptyDir represents a temporary directory that shares a Task's lifetime.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// Either this OR PersistentVolumeClaim can be used.
// +optional
EmptyDir *corev1.EmptyDirVolumeSource `json:"emptyDir,omitempty"`
// ConfigMap represents a configMap that should populate this workspace.
// +optional
ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty"`
// Secret represents a secret that should populate this workspace.
// +optional
Secret *corev1.SecretVolumeSource `json:"secret,omitempty"`
// Projected represents a projected volume that should populate this workspace.
// +optional
Projected *corev1.ProjectedVolumeSource `json:"projected,omitempty"`
// CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.
// +optional
CSI *corev1.CSIVolumeSource `json:"csi,omitempty"`
}
// WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun
// is expected to populate with a workspace binding.
//
// Deprecated: use PipelineWorkspaceDeclaration type instead
type WorkspacePipelineDeclaration = PipelineWorkspaceDeclaration
// PipelineWorkspaceDeclaration creates a named slot in a Pipeline that a PipelineRun
// is expected to populate with a workspace binding.
type PipelineWorkspaceDeclaration struct {
// Name is the name of a workspace to be provided by a PipelineRun.
Name string `json:"name"`
// Description is a human readable string describing how the workspace will be
// used in the Pipeline. It can be useful to include a bit of detail about which
// tasks are intended to have access to the data on the workspace.
// +optional
Description string `json:"description,omitempty"`
// Optional marks a Workspace as not being required in PipelineRuns. By default
// this field is false and so declared workspaces are required.
Optional bool `json:"optional,omitempty"`
}
// WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be
// mapped to a task's declared workspace.
type WorkspacePipelineTaskBinding struct {
// Name is the name of the workspace as declared by the task
Name string `json:"name"`
// Workspace is the name of the workspace declared by the pipeline
// +optional
Workspace string `json:"workspace,omitempty"`
// SubPath is optionally a directory on the volume which should be used
// for this binding (i.e. the volume will be mounted at this sub directory).
// +optional
SubPath string `json:"subPath,omitempty"`
}
// WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access
// to a Workspace defined in a Task.
type WorkspaceUsage struct {
// Name is the name of the workspace this Step or Sidecar wants access to.
Name string `json:"name"`
// MountPath is the path that the workspace should be mounted to inside the Step or Sidecar,
// overriding any MountPath specified in the Task's WorkspaceDeclaration.
MountPath string `json:"mountPath"`
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"k8s.io/apimachinery/pkg/api/equality"
"knative.dev/pkg/apis"
)
// allVolumeSourceFields is a list of all the volume source field paths that a
// WorkspaceBinding may include.
var allVolumeSourceFields = []string{
"persistentvolumeclaim",
"volumeclaimtemplate",
"emptydir",
"configmap",
"secret",
}
// Validate looks at the Volume provided in wb and makes sure that it is valid.
// This means that only one VolumeSource can be specified, and also that the
// supported VolumeSource is itself valid.
func (b *WorkspaceBinding) Validate(ctx context.Context) (errs *apis.FieldError) {
if equality.Semantic.DeepEqual(b, &WorkspaceBinding{}) || b == nil {
return apis.ErrMissingField(apis.CurrentField)
}
numSources := b.numSources()
if numSources > 1 {
return apis.ErrMultipleOneOf(allVolumeSourceFields...)
}
if numSources == 0 {
return apis.ErrMissingOneOf(allVolumeSourceFields...)
}
// For a PersistentVolumeClaim to work, you must at least provide the name of the PVC to use.
if b.PersistentVolumeClaim != nil && b.PersistentVolumeClaim.ClaimName == "" {
return apis.ErrMissingField("persistentvolumeclaim.claimname")
}
// For a ConfigMap to work, you must provide the name of the ConfigMap to use.
if b.ConfigMap != nil && b.ConfigMap.LocalObjectReference.Name == "" {
return apis.ErrMissingField("configmap.name")
}
// For a Secret to work, you must provide the name of the Secret to use.
if b.Secret != nil && b.Secret.SecretName == "" {
return apis.ErrMissingField("secret.secretName")
}
// For a Projected volume to work, you must provide at least one source.
if b.Projected != nil && len(b.Projected.Sources) == 0 {
return apis.ErrMissingField("projected.sources")
}
// For a CSI to work, you must provide and have installed the driver to use.
if b.CSI != nil && b.CSI.Driver == "" {
return apis.ErrMissingField("csi.driver")
}
return nil
}
// numSources returns the total number of volume sources that this WorkspaceBinding
// has been configured with.
func (b *WorkspaceBinding) numSources() int {
n := 0
if b.VolumeClaimTemplate != nil {
n++
}
if b.PersistentVolumeClaim != nil {
n++
}
if b.EmptyDir != nil {
n++
}
if b.ConfigMap != nil {
n++
}
if b.Secret != nil {
n++
}
if b.Projected != nil {
n++
}
if b.CSI != nil {
n++
}
return n
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
config "github.com/tektoncd/pipeline/pkg/apis/config"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1"
runv1beta1 "github.com/tektoncd/pipeline/pkg/apis/run/v1beta1"
result "github.com/tektoncd/pipeline/pkg/result"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Args) DeepCopyInto(out *Args) {
{
in := &in
*out = make(Args, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Args.
func (in Args) DeepCopy() Args {
if in == nil {
return nil
}
out := new(Args)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Artifact) DeepCopyInto(out *Artifact) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]ArtifactValue, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact.
func (in *Artifact) DeepCopy() *Artifact {
if in == nil {
return nil
}
out := new(Artifact)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ArtifactValue) DeepCopyInto(out *ArtifactValue) {
*out = *in
if in.Digest != nil {
in, out := &in.Digest, &out.Digest
*out = make(map[Algorithm]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactValue.
func (in *ArtifactValue) DeepCopy() *ArtifactValue {
if in == nil {
return nil
}
out := new(ArtifactValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Artifacts) DeepCopyInto(out *Artifacts) {
*out = *in
if in.Inputs != nil {
in, out := &in.Inputs, &out.Inputs
*out = make([]Artifact, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Outputs != nil {
in, out := &in.Outputs, &out.Outputs
*out = make([]Artifact, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifacts.
func (in *Artifacts) DeepCopy() *Artifacts {
if in == nil {
return nil
}
out := new(Artifacts)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChildStatusReference) DeepCopyInto(out *ChildStatusReference) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.WhenExpressions != nil {
in, out := &in.WhenExpressions, &out.WhenExpressions
*out = make([]WhenExpression, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChildStatusReference.
func (in *ChildStatusReference) DeepCopy() *ChildStatusReference {
if in == nil {
return nil
}
out := new(ChildStatusReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloudEventDelivery) DeepCopyInto(out *CloudEventDelivery) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudEventDelivery.
func (in *CloudEventDelivery) DeepCopy() *CloudEventDelivery {
if in == nil {
return nil
}
out := new(CloudEventDelivery)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloudEventDeliveryState) DeepCopyInto(out *CloudEventDeliveryState) {
*out = *in
if in.SentAt != nil {
in, out := &in.SentAt, &out.SentAt
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudEventDeliveryState.
func (in *CloudEventDeliveryState) DeepCopy() *CloudEventDeliveryState {
if in == nil {
return nil
}
out := new(CloudEventDeliveryState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Combination) DeepCopyInto(out *Combination) {
{
in := &in
*out = make(Combination, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Combination.
func (in Combination) DeepCopy() Combination {
if in == nil {
return nil
}
out := new(Combination)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Combinations) DeepCopyInto(out *Combinations) {
{
in := &in
*out = make(Combinations, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = make(Combination, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Combinations.
func (in Combinations) DeepCopy() Combinations {
if in == nil {
return nil
}
out := new(Combinations)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigSource) DeepCopyInto(out *ConfigSource) {
*out = *in
if in.Digest != nil {
in, out := &in.Digest, &out.Digest
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSource.
func (in *ConfigSource) DeepCopy() *ConfigSource {
if in == nil {
return nil
}
out := new(ConfigSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomRun) DeepCopyInto(out *CustomRun) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRun.
func (in *CustomRun) DeepCopy() *CustomRun {
if in == nil {
return nil
}
out := new(CustomRun)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CustomRun) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomRunList) DeepCopyInto(out *CustomRunList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CustomRun, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRunList.
func (in *CustomRunList) DeepCopy() *CustomRunList {
if in == nil {
return nil
}
out := new(CustomRunList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CustomRunList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomRunSpec) DeepCopyInto(out *CustomRunSpec) {
*out = *in
if in.CustomRef != nil {
in, out := &in.CustomRef, &out.CustomRef
*out = new(TaskRef)
(*in).DeepCopyInto(*out)
}
if in.CustomSpec != nil {
in, out := &in.CustomSpec, &out.CustomSpec
*out = new(EmbeddedCustomRunSpec)
(*in).DeepCopyInto(*out)
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspaceBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRunSpec.
func (in *CustomRunSpec) DeepCopy() *CustomRunSpec {
if in == nil {
return nil
}
out := new(CustomRunSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EmbeddedCustomRunSpec) DeepCopyInto(out *EmbeddedCustomRunSpec) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Metadata.DeepCopyInto(&out.Metadata)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedCustomRunSpec.
func (in *EmbeddedCustomRunSpec) DeepCopy() *EmbeddedCustomRunSpec {
if in == nil {
return nil
}
out := new(EmbeddedCustomRunSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EmbeddedTask) DeepCopyInto(out *EmbeddedTask) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Metadata.DeepCopyInto(&out.Metadata)
in.TaskSpec.DeepCopyInto(&out.TaskSpec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedTask.
func (in *EmbeddedTask) DeepCopy() *EmbeddedTask {
if in == nil {
return nil
}
out := new(EmbeddedTask)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IncludeParams) DeepCopyInto(out *IncludeParams) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeParams.
func (in *IncludeParams) DeepCopy() *IncludeParams {
if in == nil {
return nil
}
out := new(IncludeParams)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in IncludeParamsList) DeepCopyInto(out *IncludeParamsList) {
{
in := &in
*out = make(IncludeParamsList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeParamsList.
func (in IncludeParamsList) DeepCopy() IncludeParamsList {
if in == nil {
return nil
}
out := new(IncludeParamsList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InternalTaskModifier) DeepCopyInto(out *InternalTaskModifier) {
*out = *in
if in.StepsToPrepend != nil {
in, out := &in.StepsToPrepend, &out.StepsToPrepend
*out = make([]Step, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.StepsToAppend != nil {
in, out := &in.StepsToAppend, &out.StepsToAppend
*out = make([]Step, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]corev1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalTaskModifier.
func (in *InternalTaskModifier) DeepCopy() *InternalTaskModifier {
if in == nil {
return nil
}
out := new(InternalTaskModifier)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Matrix) DeepCopyInto(out *Matrix) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make(IncludeParamsList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Matrix.
func (in *Matrix) DeepCopy() *Matrix {
if in == nil {
return nil
}
out := new(Matrix)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Param) DeepCopyInto(out *Param) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Param.
func (in *Param) DeepCopy() *Param {
if in == nil {
return nil
}
out := new(Param)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ParamSpec) DeepCopyInto(out *ParamSpec) {
*out = *in
if in.Properties != nil {
in, out := &in.Properties, &out.Properties
*out = make(map[string]PropertySpec, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Default != nil {
in, out := &in.Default, &out.Default
*out = new(ParamValue)
(*in).DeepCopyInto(*out)
}
if in.Enum != nil {
in, out := &in.Enum, &out.Enum
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamSpec.
func (in *ParamSpec) DeepCopy() *ParamSpec {
if in == nil {
return nil
}
out := new(ParamSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ParamSpecs) DeepCopyInto(out *ParamSpecs) {
{
in := &in
*out = make(ParamSpecs, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamSpecs.
func (in ParamSpecs) DeepCopy() ParamSpecs {
if in == nil {
return nil
}
out := new(ParamSpecs)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ParamValue) DeepCopyInto(out *ParamValue) {
*out = *in
if in.ArrayVal != nil {
in, out := &in.ArrayVal, &out.ArrayVal
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ObjectVal != nil {
in, out := &in.ObjectVal, &out.ObjectVal
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamValue.
func (in *ParamValue) DeepCopy() *ParamValue {
if in == nil {
return nil
}
out := new(ParamValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Params) DeepCopyInto(out *Params) {
{
in := &in
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Params.
func (in Params) DeepCopy() Params {
if in == nil {
return nil
}
out := new(Params)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Pipeline) DeepCopyInto(out *Pipeline) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline.
func (in *Pipeline) DeepCopy() *Pipeline {
if in == nil {
return nil
}
out := new(Pipeline)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Pipeline) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineDeclaredResource) DeepCopyInto(out *PipelineDeclaredResource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineDeclaredResource.
func (in *PipelineDeclaredResource) DeepCopy() *PipelineDeclaredResource {
if in == nil {
return nil
}
out := new(PipelineDeclaredResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineList) DeepCopyInto(out *PipelineList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Pipeline, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList.
func (in *PipelineList) DeepCopy() *PipelineList {
if in == nil {
return nil
}
out := new(PipelineList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PipelineList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRef) DeepCopyInto(out *PipelineRef) {
*out = *in
in.ResolverRef.DeepCopyInto(&out.ResolverRef)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRef.
func (in *PipelineRef) DeepCopy() *PipelineRef {
if in == nil {
return nil
}
out := new(PipelineRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineResourceBinding) DeepCopyInto(out *PipelineResourceBinding) {
*out = *in
if in.ResourceRef != nil {
in, out := &in.ResourceRef, &out.ResourceRef
*out = new(PipelineResourceRef)
**out = **in
}
if in.ResourceSpec != nil {
in, out := &in.ResourceSpec, &out.ResourceSpec
*out = new(v1alpha1.PipelineResourceSpec)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineResourceBinding.
func (in *PipelineResourceBinding) DeepCopy() *PipelineResourceBinding {
if in == nil {
return nil
}
out := new(PipelineResourceBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineResourceRef) DeepCopyInto(out *PipelineResourceRef) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineResourceRef.
func (in *PipelineResourceRef) DeepCopy() *PipelineResourceRef {
if in == nil {
return nil
}
out := new(PipelineResourceRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineResult) DeepCopyInto(out *PipelineResult) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineResult.
func (in *PipelineResult) DeepCopy() *PipelineResult {
if in == nil {
return nil
}
out := new(PipelineResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRun) DeepCopyInto(out *PipelineRun) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRun.
func (in *PipelineRun) DeepCopy() *PipelineRun {
if in == nil {
return nil
}
out := new(PipelineRun)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PipelineRun) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunList) DeepCopyInto(out *PipelineRunList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PipelineRun, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunList.
func (in *PipelineRunList) DeepCopy() *PipelineRunList {
if in == nil {
return nil
}
out := new(PipelineRunList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PipelineRunList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunResult) DeepCopyInto(out *PipelineRunResult) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunResult.
func (in *PipelineRunResult) DeepCopy() *PipelineRunResult {
if in == nil {
return nil
}
out := new(PipelineRunResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunRunStatus) DeepCopyInto(out *PipelineRunRunStatus) {
*out = *in
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(runv1beta1.CustomRunStatus)
(*in).DeepCopyInto(*out)
}
if in.WhenExpressions != nil {
in, out := &in.WhenExpressions, &out.WhenExpressions
*out = make([]WhenExpression, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunRunStatus.
func (in *PipelineRunRunStatus) DeepCopy() *PipelineRunRunStatus {
if in == nil {
return nil
}
out := new(PipelineRunRunStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) {
*out = *in
if in.PipelineRef != nil {
in, out := &in.PipelineRef, &out.PipelineRef
*out = new(PipelineRef)
(*in).DeepCopyInto(*out)
}
if in.PipelineSpec != nil {
in, out := &in.PipelineSpec, &out.PipelineSpec
*out = new(PipelineSpec)
(*in).DeepCopyInto(*out)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]PipelineResourceBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Timeouts != nil {
in, out := &in.Timeouts, &out.Timeouts
*out = new(TimeoutFields)
(*in).DeepCopyInto(*out)
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
if in.PodTemplate != nil {
in, out := &in.PodTemplate, &out.PodTemplate
*out = new(pod.Template)
(*in).DeepCopyInto(*out)
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspaceBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TaskRunSpecs != nil {
in, out := &in.TaskRunSpecs, &out.TaskRunSpecs
*out = make([]PipelineTaskRunSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ManagedBy != nil {
in, out := &in.ManagedBy, &out.ManagedBy
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunSpec.
func (in *PipelineRunSpec) DeepCopy() *PipelineRunSpec {
if in == nil {
return nil
}
out := new(PipelineRunSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunStatus) DeepCopyInto(out *PipelineRunStatus) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
in.PipelineRunStatusFields.DeepCopyInto(&out.PipelineRunStatusFields)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunStatus.
func (in *PipelineRunStatus) DeepCopy() *PipelineRunStatus {
if in == nil {
return nil
}
out := new(PipelineRunStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunStatusFields) DeepCopyInto(out *PipelineRunStatusFields) {
*out = *in
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
if in.TaskRuns != nil {
in, out := &in.TaskRuns, &out.TaskRuns
*out = make(map[string]*PipelineRunTaskRunStatus, len(*in))
for key, val := range *in {
var outVal *PipelineRunTaskRunStatus
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = new(PipelineRunTaskRunStatus)
(*in).DeepCopyInto(*out)
}
(*out)[key] = outVal
}
}
if in.Runs != nil {
in, out := &in.Runs, &out.Runs
*out = make(map[string]*PipelineRunRunStatus, len(*in))
for key, val := range *in {
var outVal *PipelineRunRunStatus
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = new(PipelineRunRunStatus)
(*in).DeepCopyInto(*out)
}
(*out)[key] = outVal
}
}
if in.PipelineResults != nil {
in, out := &in.PipelineResults, &out.PipelineResults
*out = make([]PipelineRunResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PipelineSpec != nil {
in, out := &in.PipelineSpec, &out.PipelineSpec
*out = new(PipelineSpec)
(*in).DeepCopyInto(*out)
}
if in.SkippedTasks != nil {
in, out := &in.SkippedTasks, &out.SkippedTasks
*out = make([]SkippedTask, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ChildReferences != nil {
in, out := &in.ChildReferences, &out.ChildReferences
*out = make([]ChildStatusReference, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FinallyStartTime != nil {
in, out := &in.FinallyStartTime, &out.FinallyStartTime
*out = (*in).DeepCopy()
}
if in.Provenance != nil {
in, out := &in.Provenance, &out.Provenance
*out = new(Provenance)
(*in).DeepCopyInto(*out)
}
if in.SpanContext != nil {
in, out := &in.SpanContext, &out.SpanContext
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunStatusFields.
func (in *PipelineRunStatusFields) DeepCopy() *PipelineRunStatusFields {
if in == nil {
return nil
}
out := new(PipelineRunStatusFields)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineRunTaskRunStatus) DeepCopyInto(out *PipelineRunTaskRunStatus) {
*out = *in
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(TaskRunStatus)
(*in).DeepCopyInto(*out)
}
if in.WhenExpressions != nil {
in, out := &in.WhenExpressions, &out.WhenExpressions
*out = make([]WhenExpression, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunTaskRunStatus.
func (in *PipelineRunTaskRunStatus) DeepCopy() *PipelineRunTaskRunStatus {
if in == nil {
return nil
}
out := new(PipelineRunTaskRunStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]PipelineDeclaredResource, len(*in))
copy(*out, *in)
}
if in.Tasks != nil {
in, out := &in.Tasks, &out.Tasks
*out = make([]PipelineTask, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(ParamSpecs, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]PipelineWorkspaceDeclaration, len(*in))
copy(*out, *in)
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]PipelineResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Finally != nil {
in, out := &in.Finally, &out.Finally
*out = make([]PipelineTask, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec.
func (in *PipelineSpec) DeepCopy() *PipelineSpec {
if in == nil {
return nil
}
out := new(PipelineSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTask) DeepCopyInto(out *PipelineTask) {
*out = *in
if in.TaskRef != nil {
in, out := &in.TaskRef, &out.TaskRef
*out = new(TaskRef)
(*in).DeepCopyInto(*out)
}
if in.TaskSpec != nil {
in, out := &in.TaskSpec, &out.TaskSpec
*out = new(EmbeddedTask)
(*in).DeepCopyInto(*out)
}
if in.WhenExpressions != nil {
in, out := &in.WhenExpressions, &out.WhenExpressions
*out = make(WhenExpressions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RunAfter != nil {
in, out := &in.RunAfter, &out.RunAfter
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(PipelineTaskResources)
(*in).DeepCopyInto(*out)
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Matrix != nil {
in, out := &in.Matrix, &out.Matrix
*out = new(Matrix)
(*in).DeepCopyInto(*out)
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspacePipelineTaskBinding, len(*in))
copy(*out, *in)
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
if in.PipelineRef != nil {
in, out := &in.PipelineRef, &out.PipelineRef
*out = new(PipelineRef)
(*in).DeepCopyInto(*out)
}
if in.PipelineSpec != nil {
in, out := &in.PipelineSpec, &out.PipelineSpec
*out = new(PipelineSpec)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTask.
func (in *PipelineTask) DeepCopy() *PipelineTask {
if in == nil {
return nil
}
out := new(PipelineTask)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskInputResource) DeepCopyInto(out *PipelineTaskInputResource) {
*out = *in
if in.From != nil {
in, out := &in.From, &out.From
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskInputResource.
func (in *PipelineTaskInputResource) DeepCopy() *PipelineTaskInputResource {
if in == nil {
return nil
}
out := new(PipelineTaskInputResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in PipelineTaskList) DeepCopyInto(out *PipelineTaskList) {
{
in := &in
*out = make(PipelineTaskList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskList.
func (in PipelineTaskList) DeepCopy() PipelineTaskList {
if in == nil {
return nil
}
out := new(PipelineTaskList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskMetadata) DeepCopyInto(out *PipelineTaskMetadata) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskMetadata.
func (in *PipelineTaskMetadata) DeepCopy() *PipelineTaskMetadata {
if in == nil {
return nil
}
out := new(PipelineTaskMetadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskOutputResource) DeepCopyInto(out *PipelineTaskOutputResource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskOutputResource.
func (in *PipelineTaskOutputResource) DeepCopy() *PipelineTaskOutputResource {
if in == nil {
return nil
}
out := new(PipelineTaskOutputResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskParam) DeepCopyInto(out *PipelineTaskParam) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskParam.
func (in *PipelineTaskParam) DeepCopy() *PipelineTaskParam {
if in == nil {
return nil
}
out := new(PipelineTaskParam)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskResources) DeepCopyInto(out *PipelineTaskResources) {
*out = *in
if in.Inputs != nil {
in, out := &in.Inputs, &out.Inputs
*out = make([]PipelineTaskInputResource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Outputs != nil {
in, out := &in.Outputs, &out.Outputs
*out = make([]PipelineTaskOutputResource, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskResources.
func (in *PipelineTaskResources) DeepCopy() *PipelineTaskResources {
if in == nil {
return nil
}
out := new(PipelineTaskResources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskRun) DeepCopyInto(out *PipelineTaskRun) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskRun.
func (in *PipelineTaskRun) DeepCopy() *PipelineTaskRun {
if in == nil {
return nil
}
out := new(PipelineTaskRun)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskRunSpec) DeepCopyInto(out *PipelineTaskRunSpec) {
*out = *in
if in.TaskPodTemplate != nil {
in, out := &in.TaskPodTemplate, &out.TaskPodTemplate
*out = new(pod.Template)
(*in).DeepCopyInto(*out)
}
if in.StepOverrides != nil {
in, out := &in.StepOverrides, &out.StepOverrides
*out = make([]TaskRunStepOverride, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SidecarOverrides != nil {
in, out := &in.SidecarOverrides, &out.SidecarOverrides
*out = make([]TaskRunSidecarOverride, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = new(PipelineTaskMetadata)
(*in).DeepCopyInto(*out)
}
if in.ComputeResources != nil {
in, out := &in.ComputeResources, &out.ComputeResources
*out = new(corev1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskRunSpec.
func (in *PipelineTaskRunSpec) DeepCopy() *PipelineTaskRunSpec {
if in == nil {
return nil
}
out := new(PipelineTaskRunSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineWorkspaceDeclaration) DeepCopyInto(out *PipelineWorkspaceDeclaration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineWorkspaceDeclaration.
func (in *PipelineWorkspaceDeclaration) DeepCopy() *PipelineWorkspaceDeclaration {
if in == nil {
return nil
}
out := new(PipelineWorkspaceDeclaration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PropertySpec) DeepCopyInto(out *PropertySpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertySpec.
func (in *PropertySpec) DeepCopy() *PropertySpec {
if in == nil {
return nil
}
out := new(PropertySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Provenance) DeepCopyInto(out *Provenance) {
*out = *in
if in.ConfigSource != nil {
in, out := &in.ConfigSource, &out.ConfigSource
*out = new(ConfigSource)
(*in).DeepCopyInto(*out)
}
if in.RefSource != nil {
in, out := &in.RefSource, &out.RefSource
*out = new(RefSource)
(*in).DeepCopyInto(*out)
}
if in.FeatureFlags != nil {
in, out := &in.FeatureFlags, &out.FeatureFlags
*out = new(config.FeatureFlags)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provenance.
func (in *Provenance) DeepCopy() *Provenance {
if in == nil {
return nil
}
out := new(Provenance)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Ref) DeepCopyInto(out *Ref) {
*out = *in
in.ResolverRef.DeepCopyInto(&out.ResolverRef)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ref.
func (in *Ref) DeepCopy() *Ref {
if in == nil {
return nil
}
out := new(Ref)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RefSource) DeepCopyInto(out *RefSource) {
*out = *in
if in.Digest != nil {
in, out := &in.Digest, &out.Digest
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RefSource.
func (in *RefSource) DeepCopy() *RefSource {
if in == nil {
return nil
}
out := new(RefSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolverRef) DeepCopyInto(out *ResolverRef) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverRef.
func (in *ResolverRef) DeepCopy() *ResolverRef {
if in == nil {
return nil
}
out := new(ResolverRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResultRef) DeepCopyInto(out *ResultRef) {
*out = *in
if in.ResultsIndex != nil {
in, out := &in.ResultsIndex, &out.ResultsIndex
*out = new(int)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResultRef.
func (in *ResultRef) DeepCopy() *ResultRef {
if in == nil {
return nil
}
out := new(ResultRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in RetriesStatus) DeepCopyInto(out *RetriesStatus) {
{
in := &in
*out = make(RetriesStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetriesStatus.
func (in RetriesStatus) DeepCopy() RetriesStatus {
if in == nil {
return nil
}
out := new(RetriesStatus)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Sidecar) DeepCopyInto(out *Sidecar) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]corev1.ContainerPort, len(*in))
copy(*out, *in)
}
if in.EnvFrom != nil {
in, out := &in.EnvFrom, &out.EnvFrom
*out = make([]corev1.EnvFromSource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Resources.DeepCopyInto(&out.Resources)
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]corev1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeDevices != nil {
in, out := &in.VolumeDevices, &out.VolumeDevices
*out = make([]corev1.VolumeDevice, len(*in))
copy(*out, *in)
}
if in.LivenessProbe != nil {
in, out := &in.LivenessProbe, &out.LivenessProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.ReadinessProbe != nil {
in, out := &in.ReadinessProbe, &out.ReadinessProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.StartupProbe != nil {
in, out := &in.StartupProbe, &out.StartupProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.Lifecycle != nil {
in, out := &in.Lifecycle, &out.Lifecycle
*out = new(corev1.Lifecycle)
(*in).DeepCopyInto(*out)
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(corev1.SecurityContext)
(*in).DeepCopyInto(*out)
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspaceUsage, len(*in))
copy(*out, *in)
}
if in.RestartPolicy != nil {
in, out := &in.RestartPolicy, &out.RestartPolicy
*out = new(corev1.ContainerRestartPolicy)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sidecar.
func (in *Sidecar) DeepCopy() *Sidecar {
if in == nil {
return nil
}
out := new(Sidecar)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SidecarState) DeepCopyInto(out *SidecarState) {
*out = *in
in.ContainerState.DeepCopyInto(&out.ContainerState)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarState.
func (in *SidecarState) DeepCopy() *SidecarState {
if in == nil {
return nil
}
out := new(SidecarState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SkippedTask) DeepCopyInto(out *SkippedTask) {
*out = *in
if in.WhenExpressions != nil {
in, out := &in.WhenExpressions, &out.WhenExpressions
*out = make([]WhenExpression, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkippedTask.
func (in *SkippedTask) DeepCopy() *SkippedTask {
if in == nil {
return nil
}
out := new(SkippedTask)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Step) DeepCopyInto(out *Step) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DeprecatedPorts != nil {
in, out := &in.DeprecatedPorts, &out.DeprecatedPorts
*out = make([]corev1.ContainerPort, len(*in))
copy(*out, *in)
}
if in.EnvFrom != nil {
in, out := &in.EnvFrom, &out.EnvFrom
*out = make([]corev1.EnvFromSource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Resources.DeepCopyInto(&out.Resources)
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]corev1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeDevices != nil {
in, out := &in.VolumeDevices, &out.VolumeDevices
*out = make([]corev1.VolumeDevice, len(*in))
copy(*out, *in)
}
if in.DeprecatedLivenessProbe != nil {
in, out := &in.DeprecatedLivenessProbe, &out.DeprecatedLivenessProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.DeprecatedReadinessProbe != nil {
in, out := &in.DeprecatedReadinessProbe, &out.DeprecatedReadinessProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.DeprecatedStartupProbe != nil {
in, out := &in.DeprecatedStartupProbe, &out.DeprecatedStartupProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.DeprecatedLifecycle != nil {
in, out := &in.DeprecatedLifecycle, &out.DeprecatedLifecycle
*out = new(corev1.Lifecycle)
(*in).DeepCopyInto(*out)
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(corev1.SecurityContext)
(*in).DeepCopyInto(*out)
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspaceUsage, len(*in))
copy(*out, *in)
}
if in.StdoutConfig != nil {
in, out := &in.StdoutConfig, &out.StdoutConfig
*out = new(StepOutputConfig)
**out = **in
}
if in.StderrConfig != nil {
in, out := &in.StderrConfig, &out.StderrConfig
*out = new(StepOutputConfig)
**out = **in
}
if in.Ref != nil {
in, out := &in.Ref, &out.Ref
*out = new(Ref)
(*in).DeepCopyInto(*out)
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]pipelinev1.StepResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.When != nil {
in, out := &in.When, &out.When
*out = make(WhenExpressions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step.
func (in *Step) DeepCopy() *Step {
if in == nil {
return nil
}
out := new(Step)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepAction) DeepCopyInto(out *StepAction) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepAction.
func (in *StepAction) DeepCopy() *StepAction {
if in == nil {
return nil
}
out := new(StepAction)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StepAction) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepActionList) DeepCopyInto(out *StepActionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]StepAction, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepActionList.
func (in *StepActionList) DeepCopy() *StepActionList {
if in == nil {
return nil
}
out := new(StepActionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StepActionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepActionSpec) DeepCopyInto(out *StepActionSpec) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make(Args, len(*in))
copy(*out, *in)
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(pipelinev1.ParamSpecs, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]pipelinev1.StepResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(corev1.SecurityContext)
(*in).DeepCopyInto(*out)
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]corev1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepActionSpec.
func (in *StepActionSpec) DeepCopy() *StepActionSpec {
if in == nil {
return nil
}
out := new(StepActionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepOutputConfig) DeepCopyInto(out *StepOutputConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepOutputConfig.
func (in *StepOutputConfig) DeepCopy() *StepOutputConfig {
if in == nil {
return nil
}
out := new(StepOutputConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepState) DeepCopyInto(out *StepState) {
*out = *in
in.ContainerState.DeepCopyInto(&out.ContainerState)
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]TaskRunResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Provenance != nil {
in, out := &in.Provenance, &out.Provenance
*out = new(Provenance)
(*in).DeepCopyInto(*out)
}
if in.Inputs != nil {
in, out := &in.Inputs, &out.Inputs
*out = make([]Artifact, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Outputs != nil {
in, out := &in.Outputs, &out.Outputs
*out = make([]Artifact, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepState.
func (in *StepState) DeepCopy() *StepState {
if in == nil {
return nil
}
out := new(StepState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepTemplate) DeepCopyInto(out *StepTemplate) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DeprecatedPorts != nil {
in, out := &in.DeprecatedPorts, &out.DeprecatedPorts
*out = make([]corev1.ContainerPort, len(*in))
copy(*out, *in)
}
if in.EnvFrom != nil {
in, out := &in.EnvFrom, &out.EnvFrom
*out = make([]corev1.EnvFromSource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Resources.DeepCopyInto(&out.Resources)
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]corev1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeDevices != nil {
in, out := &in.VolumeDevices, &out.VolumeDevices
*out = make([]corev1.VolumeDevice, len(*in))
copy(*out, *in)
}
if in.DeprecatedLivenessProbe != nil {
in, out := &in.DeprecatedLivenessProbe, &out.DeprecatedLivenessProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.DeprecatedReadinessProbe != nil {
in, out := &in.DeprecatedReadinessProbe, &out.DeprecatedReadinessProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.DeprecatedStartupProbe != nil {
in, out := &in.DeprecatedStartupProbe, &out.DeprecatedStartupProbe
*out = new(corev1.Probe)
(*in).DeepCopyInto(*out)
}
if in.DeprecatedLifecycle != nil {
in, out := &in.DeprecatedLifecycle, &out.DeprecatedLifecycle
*out = new(corev1.Lifecycle)
(*in).DeepCopyInto(*out)
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(corev1.SecurityContext)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepTemplate.
func (in *StepTemplate) DeepCopy() *StepTemplate {
if in == nil {
return nil
}
out := new(StepTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Task) DeepCopyInto(out *Task) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task.
func (in *Task) DeepCopy() *Task {
if in == nil {
return nil
}
out := new(Task)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Task) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskBreakpoints) DeepCopyInto(out *TaskBreakpoints) {
*out = *in
if in.BeforeSteps != nil {
in, out := &in.BeforeSteps, &out.BeforeSteps
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskBreakpoints.
func (in *TaskBreakpoints) DeepCopy() *TaskBreakpoints {
if in == nil {
return nil
}
out := new(TaskBreakpoints)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskList) DeepCopyInto(out *TaskList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Task, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList.
func (in *TaskList) DeepCopy() *TaskList {
if in == nil {
return nil
}
out := new(TaskList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TaskList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRef) DeepCopyInto(out *TaskRef) {
*out = *in
in.ResolverRef.DeepCopyInto(&out.ResolverRef)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRef.
func (in *TaskRef) DeepCopy() *TaskRef {
if in == nil {
return nil
}
out := new(TaskRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskResource) DeepCopyInto(out *TaskResource) {
*out = *in
out.ResourceDeclaration = in.ResourceDeclaration
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskResource.
func (in *TaskResource) DeepCopy() *TaskResource {
if in == nil {
return nil
}
out := new(TaskResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskResourceBinding) DeepCopyInto(out *TaskResourceBinding) {
*out = *in
in.PipelineResourceBinding.DeepCopyInto(&out.PipelineResourceBinding)
if in.Paths != nil {
in, out := &in.Paths, &out.Paths
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskResourceBinding.
func (in *TaskResourceBinding) DeepCopy() *TaskResourceBinding {
if in == nil {
return nil
}
out := new(TaskResourceBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskResources) DeepCopyInto(out *TaskResources) {
*out = *in
if in.Inputs != nil {
in, out := &in.Inputs, &out.Inputs
*out = make([]TaskResource, len(*in))
copy(*out, *in)
}
if in.Outputs != nil {
in, out := &in.Outputs, &out.Outputs
*out = make([]TaskResource, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskResources.
func (in *TaskResources) DeepCopy() *TaskResources {
if in == nil {
return nil
}
out := new(TaskResources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskResult) DeepCopyInto(out *TaskResult) {
*out = *in
if in.Properties != nil {
in, out := &in.Properties, &out.Properties
*out = make(map[string]PropertySpec, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Value != nil {
in, out := &in.Value, &out.Value
*out = new(ParamValue)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskResult.
func (in *TaskResult) DeepCopy() *TaskResult {
if in == nil {
return nil
}
out := new(TaskResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRun) DeepCopyInto(out *TaskRun) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRun.
func (in *TaskRun) DeepCopy() *TaskRun {
if in == nil {
return nil
}
out := new(TaskRun)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TaskRun) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunDebug) DeepCopyInto(out *TaskRunDebug) {
*out = *in
if in.Breakpoints != nil {
in, out := &in.Breakpoints, &out.Breakpoints
*out = new(TaskBreakpoints)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunDebug.
func (in *TaskRunDebug) DeepCopy() *TaskRunDebug {
if in == nil {
return nil
}
out := new(TaskRunDebug)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunInputs) DeepCopyInto(out *TaskRunInputs) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]TaskResourceBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make([]Param, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunInputs.
func (in *TaskRunInputs) DeepCopy() *TaskRunInputs {
if in == nil {
return nil
}
out := new(TaskRunInputs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunList) DeepCopyInto(out *TaskRunList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]TaskRun, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunList.
func (in *TaskRunList) DeepCopy() *TaskRunList {
if in == nil {
return nil
}
out := new(TaskRunList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TaskRunList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunOutputs) DeepCopyInto(out *TaskRunOutputs) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]TaskResourceBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunOutputs.
func (in *TaskRunOutputs) DeepCopy() *TaskRunOutputs {
if in == nil {
return nil
}
out := new(TaskRunOutputs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunResources) DeepCopyInto(out *TaskRunResources) {
*out = *in
if in.Inputs != nil {
in, out := &in.Inputs, &out.Inputs
*out = make([]TaskResourceBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Outputs != nil {
in, out := &in.Outputs, &out.Outputs
*out = make([]TaskResourceBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunResources.
func (in *TaskRunResources) DeepCopy() *TaskRunResources {
if in == nil {
return nil
}
out := new(TaskRunResources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunResult) DeepCopyInto(out *TaskRunResult) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunResult.
func (in *TaskRunResult) DeepCopy() *TaskRunResult {
if in == nil {
return nil
}
out := new(TaskRunResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunSidecarOverride) DeepCopyInto(out *TaskRunSidecarOverride) {
*out = *in
in.Resources.DeepCopyInto(&out.Resources)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunSidecarOverride.
func (in *TaskRunSidecarOverride) DeepCopy() *TaskRunSidecarOverride {
if in == nil {
return nil
}
out := new(TaskRunSidecarOverride)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) {
*out = *in
if in.Debug != nil {
in, out := &in.Debug, &out.Debug
*out = new(TaskRunDebug)
(*in).DeepCopyInto(*out)
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(Params, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(TaskRunResources)
(*in).DeepCopyInto(*out)
}
if in.TaskRef != nil {
in, out := &in.TaskRef, &out.TaskRef
*out = new(TaskRef)
(*in).DeepCopyInto(*out)
}
if in.TaskSpec != nil {
in, out := &in.TaskSpec, &out.TaskSpec
*out = new(TaskSpec)
(*in).DeepCopyInto(*out)
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
if in.PodTemplate != nil {
in, out := &in.PodTemplate, &out.PodTemplate
*out = new(pod.Template)
(*in).DeepCopyInto(*out)
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspaceBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.StepOverrides != nil {
in, out := &in.StepOverrides, &out.StepOverrides
*out = make([]TaskRunStepOverride, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SidecarOverrides != nil {
in, out := &in.SidecarOverrides, &out.SidecarOverrides
*out = make([]TaskRunSidecarOverride, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ComputeResources != nil {
in, out := &in.ComputeResources, &out.ComputeResources
*out = new(corev1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.ManagedBy != nil {
in, out := &in.ManagedBy, &out.ManagedBy
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunSpec.
func (in *TaskRunSpec) DeepCopy() *TaskRunSpec {
if in == nil {
return nil
}
out := new(TaskRunSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunStatus) DeepCopyInto(out *TaskRunStatus) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
in.TaskRunStatusFields.DeepCopyInto(&out.TaskRunStatusFields)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunStatus.
func (in *TaskRunStatus) DeepCopy() *TaskRunStatus {
if in == nil {
return nil
}
out := new(TaskRunStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunStatusFields) DeepCopyInto(out *TaskRunStatusFields) {
*out = *in
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
if in.Steps != nil {
in, out := &in.Steps, &out.Steps
*out = make([]StepState, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CloudEvents != nil {
in, out := &in.CloudEvents, &out.CloudEvents
*out = make([]CloudEventDelivery, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RetriesStatus != nil {
in, out := &in.RetriesStatus, &out.RetriesStatus
*out = make(RetriesStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ResourcesResult != nil {
in, out := &in.ResourcesResult, &out.ResourcesResult
*out = make([]result.RunResult, len(*in))
copy(*out, *in)
}
if in.TaskRunResults != nil {
in, out := &in.TaskRunResults, &out.TaskRunResults
*out = make([]TaskRunResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Sidecars != nil {
in, out := &in.Sidecars, &out.Sidecars
*out = make([]SidecarState, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TaskSpec != nil {
in, out := &in.TaskSpec, &out.TaskSpec
*out = new(TaskSpec)
(*in).DeepCopyInto(*out)
}
if in.Provenance != nil {
in, out := &in.Provenance, &out.Provenance
*out = new(Provenance)
(*in).DeepCopyInto(*out)
}
if in.SpanContext != nil {
in, out := &in.SpanContext, &out.SpanContext
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunStatusFields.
func (in *TaskRunStatusFields) DeepCopy() *TaskRunStatusFields {
if in == nil {
return nil
}
out := new(TaskRunStatusFields)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskRunStepOverride) DeepCopyInto(out *TaskRunStepOverride) {
*out = *in
in.Resources.DeepCopyInto(&out.Resources)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunStepOverride.
func (in *TaskRunStepOverride) DeepCopy() *TaskRunStepOverride {
if in == nil {
return nil
}
out := new(TaskRunStepOverride)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskSpec) DeepCopyInto(out *TaskSpec) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(TaskResources)
(*in).DeepCopyInto(*out)
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make(ParamSpecs, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Steps != nil {
in, out := &in.Steps, &out.Steps
*out = make([]Step, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make(Volumes, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.StepTemplate != nil {
in, out := &in.StepTemplate, &out.StepTemplate
*out = new(StepTemplate)
(*in).DeepCopyInto(*out)
}
if in.Sidecars != nil {
in, out := &in.Sidecars, &out.Sidecars
*out = make([]Sidecar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make([]WorkspaceDeclaration, len(*in))
copy(*out, *in)
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]TaskResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec.
func (in *TaskSpec) DeepCopy() *TaskSpec {
if in == nil {
return nil
}
out := new(TaskSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TimeoutFields) DeepCopyInto(out *TimeoutFields) {
*out = *in
if in.Pipeline != nil {
in, out := &in.Pipeline, &out.Pipeline
*out = new(v1.Duration)
**out = **in
}
if in.Tasks != nil {
in, out := &in.Tasks, &out.Tasks
*out = new(v1.Duration)
**out = **in
}
if in.Finally != nil {
in, out := &in.Finally, &out.Finally
*out = new(v1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutFields.
func (in *TimeoutFields) DeepCopy() *TimeoutFields {
if in == nil {
return nil
}
out := new(TimeoutFields)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Volumes) DeepCopyInto(out *Volumes) {
{
in := &in
*out = make(Volumes, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volumes.
func (in Volumes) DeepCopy() Volumes {
if in == nil {
return nil
}
out := new(Volumes)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WhenExpression) DeepCopyInto(out *WhenExpression) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WhenExpression.
func (in *WhenExpression) DeepCopy() *WhenExpression {
if in == nil {
return nil
}
out := new(WhenExpression)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in WhenExpressions) DeepCopyInto(out *WhenExpressions) {
{
in := &in
*out = make(WhenExpressions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WhenExpressions.
func (in WhenExpressions) DeepCopy() WhenExpressions {
if in == nil {
return nil
}
out := new(WhenExpressions)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspaceBinding) DeepCopyInto(out *WorkspaceBinding) {
*out = *in
if in.VolumeClaimTemplate != nil {
in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate
*out = new(corev1.PersistentVolumeClaim)
(*in).DeepCopyInto(*out)
}
if in.PersistentVolumeClaim != nil {
in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim
*out = new(corev1.PersistentVolumeClaimVolumeSource)
**out = **in
}
if in.EmptyDir != nil {
in, out := &in.EmptyDir, &out.EmptyDir
*out = new(corev1.EmptyDirVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.ConfigMap != nil {
in, out := &in.ConfigMap, &out.ConfigMap
*out = new(corev1.ConfigMapVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
*out = new(corev1.SecretVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Projected != nil {
in, out := &in.Projected, &out.Projected
*out = new(corev1.ProjectedVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.CSI != nil {
in, out := &in.CSI, &out.CSI
*out = new(corev1.CSIVolumeSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceBinding.
func (in *WorkspaceBinding) DeepCopy() *WorkspaceBinding {
if in == nil {
return nil
}
out := new(WorkspaceBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspaceDeclaration) DeepCopyInto(out *WorkspaceDeclaration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceDeclaration.
func (in *WorkspaceDeclaration) DeepCopy() *WorkspaceDeclaration {
if in == nil {
return nil
}
out := new(WorkspaceDeclaration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspacePipelineTaskBinding) DeepCopyInto(out *WorkspacePipelineTaskBinding) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspacePipelineTaskBinding.
func (in *WorkspacePipelineTaskBinding) DeepCopy() *WorkspacePipelineTaskBinding {
if in == nil {
return nil
}
out := new(WorkspacePipelineTaskBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspaceUsage) DeepCopyInto(out *WorkspaceUsage) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceUsage.
func (in *WorkspaceUsage) DeepCopy() *WorkspaceUsage {
if in == nil {
return nil
}
out := new(WorkspaceUsage)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"github.com/tektoncd/pipeline/pkg/apis/resolution"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: resolution.GroupName, Version: "v1alpha1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder builds a scheme with the types known to the package.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme adds the types known to this package to an existing schema.
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&ResolutionRequest{},
&ResolutionRequestList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"fmt"
"strings"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"knative.dev/pkg/apis"
)
var _ apis.Convertible = (*ResolutionRequest)(nil)
// ConvertTo implements apis.Convertible
func (rr *ResolutionRequest) ConvertTo(ctx context.Context, sink apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
switch sink := sink.(type) {
case *v1beta1.ResolutionRequest:
sink.ObjectMeta = rr.ObjectMeta
rr.Status.convertTo(ctx, &sink.Status)
return rr.Spec.ConvertTo(ctx, &sink.Spec)
default:
return fmt.Errorf("unknown version, got: %T", sink)
}
}
// ConvertTo converts a v1alpha1.ResolutionRequestSpec to a v1beta1.ResolutionRequestSpec
func (rrs *ResolutionRequestSpec) ConvertTo(ctx context.Context, sink *v1beta1.ResolutionRequestSpec) error {
for k, v := range rrs.Parameters {
sink.Params = append(sink.Params, pipelinev1.Param{
Name: k,
Value: pipelinev1.ParamValue{
Type: pipelinev1.ParamTypeString,
StringVal: v,
},
})
}
return nil
}
// convertTo converts a v1alpha1.ResolutionRequestStatus to a v1beta1.ResolutionRequestStatus
func (rrs *ResolutionRequestStatus) convertTo(ctx context.Context, sink *v1beta1.ResolutionRequestStatus) {
sink.Data = rrs.Data
if rrs.RefSource != nil {
refSource := pipelinev1.RefSource{}
refSource.URI = rrs.RefSource.URI
refSource.EntryPoint = rrs.RefSource.EntryPoint
digest := make(map[string]string)
for k, v := range rrs.RefSource.Digest {
digest[k] = v
}
refSource.Digest = digest
sink.RefSource = &refSource
}
}
// ConvertFrom implements apis.Convertible
func (rr *ResolutionRequest) ConvertFrom(ctx context.Context, from apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
switch from := from.(type) {
case *v1beta1.ResolutionRequest:
rr.ObjectMeta = from.ObjectMeta
rr.Status.convertFrom(ctx, &from.Status)
return rr.Spec.ConvertFrom(ctx, &from.Spec)
default:
return fmt.Errorf("unknown version, got: %T", from)
}
}
// ConvertFrom converts a v1beta1.ResolutionRequestSpec to a v1alpha1.ResolutionRequestSpec
func (rrs *ResolutionRequestSpec) ConvertFrom(ctx context.Context, from *v1beta1.ResolutionRequestSpec) error {
var nonStringParams []string
for _, p := range from.Params {
if p.Value.Type != pipelinev1.ParamTypeString {
nonStringParams = append(nonStringParams, p.Name)
} else {
if rrs.Parameters == nil {
rrs.Parameters = make(map[string]string)
}
rrs.Parameters[p.Name] = p.Value.StringVal
}
}
if len(nonStringParams) > 0 {
return fmt.Errorf("cannot convert v1beta1 to v1alpha, non-string type parameter(s) found: %s", strings.Join(nonStringParams, ", "))
}
return nil
}
// convertFrom converts a v1alpha1.ResolutionRequestStatus to a v1beta1.ResolutionRequestStatus
func (rrs *ResolutionRequestStatus) convertFrom(ctx context.Context, from *v1beta1.ResolutionRequestStatus) {
rrs.Data = from.Data
if from.RefSource != nil {
refSource := pipelinev1.RefSource{}
refSource.URI = from.RefSource.URI
refSource.EntryPoint = from.RefSource.EntryPoint
digest := make(map[string]string)
for k, v := range from.RefSource.Digest {
digest[k] = v
}
refSource.Digest = digest
rrs.RefSource = &refSource
} else if from.Source != nil {
refSource := pipelinev1.RefSource{}
refSource.URI = from.Source.URI
refSource.EntryPoint = from.Source.EntryPoint
digest := make(map[string]string)
for k, v := range from.Source.Digest {
digest[k] = v
}
refSource.Digest = digest
rrs.RefSource = &refSource
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import "context"
// ManagedByLabelKey is the label key used to mark what is managing this resource
const ManagedByLabelKey = "app.kubernetes.io/managed-by"
// SetDefaults walks a ResolutionRequest object and sets any default
// values that are required to be set before a reconciler sees it.
func (rr *ResolutionRequest) SetDefaults(ctx context.Context) {
if rr.TypeMeta.Kind == "" {
rr.TypeMeta.Kind = "ResolutionRequest"
}
if rr.TypeMeta.APIVersion == "" {
rr.TypeMeta.APIVersion = "resolution.tekton.dev/v1alpha1"
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/apis"
)
// ResolutionRequests only have apis.ConditionSucceeded for now.
var resolutionRequestCondSet = apis.NewBatchConditionSet()
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*ResolutionRequest) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind("ResolutionRequest")
}
// GetConditionSet implements KRShaped.
func (*ResolutionRequest) GetConditionSet() apis.ConditionSet {
return resolutionRequestCondSet
}
// HasStarted returns whether a ResolutionRequests Status is considered to
// be in-progress.
func (rr *ResolutionRequest) HasStarted() bool {
return rr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown()
}
// IsDone returns whether a ResolutionRequests Status is considered to be
// in a completed state, independent of success/failure.
func (rr *ResolutionRequest) IsDone() bool {
finalStateIsUnknown := rr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown()
return !finalStateIsUnknown
}
// InitializeConditions set ths initial values of the conditions.
func (s *ResolutionRequestStatus) InitializeConditions() {
resolutionRequestCondSet.Manage(s).InitializeConditions()
}
// MarkFailed sets the Succeeded condition to False with an accompanying
// error message.
func (s *ResolutionRequestStatus) MarkFailed(reason, message string) {
resolutionRequestCondSet.Manage(s).MarkFalse(apis.ConditionSucceeded, reason, message)
}
// MarkSucceeded sets the Succeeded condition to True.
func (s *ResolutionRequestStatus) MarkSucceeded() {
resolutionRequestCondSet.Manage(s).MarkTrue(apis.ConditionSucceeded)
}
// MarkInProgress updates the Succeeded condition to Unknown with an
// accompanying message.
func (s *ResolutionRequestStatus) MarkInProgress(message string) {
resolutionRequestCondSet.Manage(s).MarkUnknown(apis.ConditionSucceeded, resolutioncommon.ReasonResolutionInProgress, message)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResolutionRequest is an object for requesting the content of
// a Tekton resource like a pipeline.yaml.
//
// +genclient
// +genreconciler
type ResolutionRequest struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec holds the information for the request part of the resource request.
// +optional
Spec ResolutionRequestSpec `json:"spec,omitempty"`
// Status communicates the state of the request and, ultimately,
// the content of the resolved resource.
// +optional
Status ResolutionRequestStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResolutionRequestList is a list of ResolutionRequests.
type ResolutionRequestList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata"`
Items []ResolutionRequest `json:"items"`
}
// ResolutionRequestSpec are all the fields in the spec of the
// ResolutionRequest CRD.
type ResolutionRequestSpec struct {
// Parameters are the runtime attributes passed to
// the resolver to help it figure out how to resolve the
// resource being requested. For example: repo URL, commit SHA,
// path to file, the kind of authentication to leverage, etc.
// +optional
Parameters map[string]string `json:"params,omitempty"`
}
// ResolutionRequestStatus are all the fields in a ResolutionRequest's
// status subresource.
type ResolutionRequestStatus struct {
duckv1.Status `json:",inline"`
ResolutionRequestStatusFields `json:",inline"`
}
// ResolutionRequestStatusFields are the ResolutionRequest-specific fields
// for the status subresource.
type ResolutionRequestStatusFields struct {
// Data is a string representation of the resolved content
// of the requested resource in-lined into the ResolutionRequest
// object.
Data string `json:"data"`
// RefSource is the source reference of the remote data that records where the remote
// file came from including the url, digest and the entrypoint.
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
RefSource *pipelinev1.RefSource `json:"refSource"`
}
// GetStatus implements KRShaped.
func (rr *ResolutionRequest) GetStatus() *duckv1.Status {
return &rr.Status.Status
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"github.com/tektoncd/pipeline/pkg/resolution/common"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var (
_ apis.Validatable = (*ResolutionRequest)(nil)
_ resourcesemantics.VerbLimited = (*ResolutionRequest)(nil)
)
// SupportedVerbs returns the operations that validation should be called for
func (rr *ResolutionRequest) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate checks that a submitted ResolutionRequest is structurally
// sound before the controller receives it.
func (rr *ResolutionRequest) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(validateTypeLabel(rr))
return errs.Also(rr.Spec.Validate(ctx).ViaField("spec"))
}
// Validate checks the spec field of a ResolutionRequest is valid.
func (rs *ResolutionRequestSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
return nil
}
func validateTypeLabel(rr *ResolutionRequest) *apis.FieldError {
typeLabel := getTypeLabel(rr.ObjectMeta.Labels)
if typeLabel == "" {
return apis.ErrMissingField(common.LabelKeyResolverType).ViaField("labels").ViaField("meta")
}
return nil
}
func getTypeLabel(labels map[string]string) string {
if labels == nil {
return ""
}
return labels[common.LabelKeyResolverType]
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolutionRequest) DeepCopyInto(out *ResolutionRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequest.
func (in *ResolutionRequest) DeepCopy() *ResolutionRequest {
if in == nil {
return nil
}
out := new(ResolutionRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResolutionRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolutionRequestList) DeepCopyInto(out *ResolutionRequestList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResolutionRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestList.
func (in *ResolutionRequestList) DeepCopy() *ResolutionRequestList {
if in == nil {
return nil
}
out := new(ResolutionRequestList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResolutionRequestList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolutionRequestSpec) DeepCopyInto(out *ResolutionRequestSpec) {
*out = *in
if in.Parameters != nil {
in, out := &in.Parameters, &out.Parameters
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestSpec.
func (in *ResolutionRequestSpec) DeepCopy() *ResolutionRequestSpec {
if in == nil {
return nil
}
out := new(ResolutionRequestSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolutionRequestStatus) DeepCopyInto(out *ResolutionRequestStatus) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
in.ResolutionRequestStatusFields.DeepCopyInto(&out.ResolutionRequestStatusFields)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestStatus.
func (in *ResolutionRequestStatus) DeepCopy() *ResolutionRequestStatus {
if in == nil {
return nil
}
out := new(ResolutionRequestStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolutionRequestStatusFields) DeepCopyInto(out *ResolutionRequestStatusFields) {
*out = *in
if in.RefSource != nil {
in, out := &in.RefSource, &out.RefSource
*out = new(v1.RefSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestStatusFields.
func (in *ResolutionRequestStatusFields) DeepCopy() *ResolutionRequestStatusFields {
if in == nil {
return nil
}
out := new(ResolutionRequestStatusFields)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"github.com/tektoncd/pipeline/pkg/apis/resolution"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: resolution.GroupName, Version: "v1beta1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder builds a scheme with the types known to the package.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme adds the types known to this package to an existing schema.
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&ResolutionRequest{},
&ResolutionRequestList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"knative.dev/pkg/apis"
)
var _ apis.Convertible = (*ResolutionRequest)(nil)
// ConvertTo implements apis.Convertible
func (rr *ResolutionRequest) ConvertTo(ctx context.Context, sink apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink)
}
// ConvertFrom implements apis.Convertible
func (rr *ResolutionRequest) ConvertFrom(ctx context.Context, source apis.Convertible) error {
if apis.IsInDelete(ctx) {
return nil
}
return fmt.Errorf("v1beta1 is the highest known version, got: %T", source)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import "context"
// ManagedByLabelKey is the label key used to mark what is managing this resource
const ManagedByLabelKey = "app.kubernetes.io/managed-by"
// SetDefaults walks a ResolutionRequest object and sets any default
// values that are required to be set before a reconciler sees it.
func (rr *ResolutionRequest) SetDefaults(ctx context.Context) {
if rr.TypeMeta.Kind == "" {
rr.TypeMeta.Kind = "ResolutionRequest"
}
if rr.TypeMeta.APIVersion == "" {
rr.TypeMeta.APIVersion = "resolution.tekton.dev/v1beta1"
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/apis"
)
// ResolutionRequests only have apis.ConditionSucceeded for now.
var resolutionRequestCondSet = apis.NewBatchConditionSet()
// GetGroupVersionKind implements kmeta.OwnerRefable.
func (*ResolutionRequest) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind("ResolutionRequest")
}
// GetConditionSet implements KRShaped.
func (*ResolutionRequest) GetConditionSet() apis.ConditionSet {
return resolutionRequestCondSet
}
// HasStarted returns whether a ResolutionRequests Status is considered to
// be in-progress.
func (rr *ResolutionRequest) HasStarted() bool {
return rr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown()
}
// IsDone returns whether a ResolutionRequests Status is considered to be
// in a completed state, independent of success/failure.
func (rr *ResolutionRequest) IsDone() bool {
finalStateIsUnknown := rr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown()
return !finalStateIsUnknown
}
// InitializeConditions set ths initial values of the conditions.
func (s *ResolutionRequestStatus) InitializeConditions() {
resolutionRequestCondSet.Manage(s).InitializeConditions()
}
// MarkFailed sets the Succeeded condition to False with an accompanying
// error message.
func (s *ResolutionRequestStatus) MarkFailed(reason, message string) {
resolutionRequestCondSet.Manage(s).MarkFalse(apis.ConditionSucceeded, reason, message)
}
// MarkSucceeded sets the Succeeded condition to True.
func (s *ResolutionRequestStatus) MarkSucceeded() {
resolutionRequestCondSet.Manage(s).MarkTrue(apis.ConditionSucceeded)
}
// MarkInProgress updates the Succeeded condition to Unknown with an
// accompanying message.
func (s *ResolutionRequestStatus) MarkInProgress(message string) {
resolutionRequestCondSet.Manage(s).MarkUnknown(apis.ConditionSucceeded, resolutioncommon.ReasonResolutionInProgress, message)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResolutionRequest is an object for requesting the content of
// a Tekton resource like a pipeline.yaml.
//
// +genclient
// +genreconciler
// +kubebuilder:storageversion
type ResolutionRequest struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec holds the information for the request part of the resource request.
// +optional
Spec ResolutionRequestSpec `json:"spec,omitempty"`
// Status communicates the state of the request and, ultimately,
// the content of the resolved resource.
// +optional
Status ResolutionRequestStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResolutionRequestList is a list of ResolutionRequests.
type ResolutionRequestList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata"`
Items []ResolutionRequest `json:"items"`
}
// ResolutionRequestSpec are all the fields in the spec of the
// ResolutionRequest CRD.
type ResolutionRequestSpec struct {
// Parameters are the runtime attributes passed to
// the resolver to help it figure out how to resolve the
// resource being requested. For example: repo URL, commit SHA,
// path to file, the kind of authentication to leverage, etc.
// +optional
// +listType=atomic
Params []pipelinev1.Param `json:"params,omitempty"`
// URL is the runtime url passed to the resolver
// to help it figure out how to resolver the resource being
// requested.
// This is currently at an ALPHA stability level and subject to
// alpha API compatibility policies.
// +optional
URL string `json:"url,omitempty"`
}
// ResolutionRequestStatus are all the fields in a ResolutionRequest's
// status subresource.
type ResolutionRequestStatus struct {
duckv1.Status `json:",inline"`
ResolutionRequestStatusFields `json:",inline"`
}
// ResolutionRequestStatusFields are the ResolutionRequest-specific fields
// for the status subresource.
type ResolutionRequestStatusFields struct {
// Data is a string representation of the resolved content
// of the requested resource in-lined into the ResolutionRequest
// object.
Data string `json:"data"`
// Deprecated: Use RefSource instead
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
Source *pipelinev1.RefSource `json:"source"`
// RefSource is the source reference of the remote data that records the url, digest
// and the entrypoint.
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
RefSource *pipelinev1.RefSource `json:"refSource"`
}
// GetStatus implements KRShaped.
func (rr *ResolutionRequest) GetStatus() *duckv1.Status {
return &rr.Status.Status
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"github.com/tektoncd/pipeline/pkg/resolution/common"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
)
var (
_ apis.Validatable = (*ResolutionRequest)(nil)
_ resourcesemantics.VerbLimited = (*ResolutionRequest)(nil)
)
// SupportedVerbs returns the operations that validation should be called for
func (rr *ResolutionRequest) SupportedVerbs() []admissionregistrationv1.OperationType {
return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
}
// Validate checks that a submitted ResolutionRequest is structurally
// sound before the controller receives it.
func (rr *ResolutionRequest) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(validateTypeLabel(rr))
return errs.Also(rr.Spec.Validate(ctx).ViaField("spec"))
}
// Validate checks the spec field of a ResolutionRequest is valid.
func (rs *ResolutionRequestSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
return nil
}
func validateTypeLabel(rr *ResolutionRequest) *apis.FieldError {
typeLabel := getTypeLabel(rr.ObjectMeta.Labels)
if typeLabel == "" {
return apis.ErrMissingField(common.LabelKeyResolverType).ViaField("labels").ViaField("meta")
}
return nil
}
func getTypeLabel(labels map[string]string) string {
if labels == nil {
return ""
}
return labels[common.LabelKeyResolverType]
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolutionRequest) DeepCopyInto(out *ResolutionRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequest.
func (in *ResolutionRequest) DeepCopy() *ResolutionRequest {
if in == nil {
return nil
}
out := new(ResolutionRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResolutionRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolutionRequestList) DeepCopyInto(out *ResolutionRequestList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResolutionRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestList.
func (in *ResolutionRequestList) DeepCopy() *ResolutionRequestList {
if in == nil {
return nil
}
out := new(ResolutionRequestList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResolutionRequestList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolutionRequestSpec) DeepCopyInto(out *ResolutionRequestSpec) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make([]v1.Param, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestSpec.
func (in *ResolutionRequestSpec) DeepCopy() *ResolutionRequestSpec {
if in == nil {
return nil
}
out := new(ResolutionRequestSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolutionRequestStatus) DeepCopyInto(out *ResolutionRequestStatus) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
in.ResolutionRequestStatusFields.DeepCopyInto(&out.ResolutionRequestStatusFields)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestStatus.
func (in *ResolutionRequestStatus) DeepCopy() *ResolutionRequestStatus {
if in == nil {
return nil
}
out := new(ResolutionRequestStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResolutionRequestStatusFields) DeepCopyInto(out *ResolutionRequestStatusFields) {
*out = *in
if in.Source != nil {
in, out := &in.Source, &out.Source
*out = new(v1.RefSource)
(*in).DeepCopyInto(*out)
}
if in.RefSource != nil {
in, out := &in.RefSource, &out.RefSource
*out = new(v1.RefSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestStatusFields.
func (in *ResolutionRequestStatusFields) DeepCopy() *ResolutionRequestStatusFields {
if in == nil {
return nil
}
out := new(ResolutionRequestStatusFields)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: pipeline.GroupName, Version: "v1alpha1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme adds Build types to the scheme.
AddToScheme = schemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&PipelineResource{},
&PipelineResourceList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineResource) DeepCopyInto(out *PipelineResource) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(PipelineResourceStatus)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineResource.
func (in *PipelineResource) DeepCopy() *PipelineResource {
if in == nil {
return nil
}
out := new(PipelineResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PipelineResource) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineResourceList) DeepCopyInto(out *PipelineResourceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PipelineResource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineResourceList.
func (in *PipelineResourceList) DeepCopy() *PipelineResourceList {
if in == nil {
return nil
}
out := new(PipelineResourceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PipelineResourceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineResourceSpec) DeepCopyInto(out *PipelineResourceSpec) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = make([]ResourceParam, len(*in))
copy(*out, *in)
}
if in.SecretParams != nil {
in, out := &in.SecretParams, &out.SecretParams
*out = make([]SecretParam, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineResourceSpec.
func (in *PipelineResourceSpec) DeepCopy() *PipelineResourceSpec {
if in == nil {
return nil
}
out := new(PipelineResourceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineResourceStatus) DeepCopyInto(out *PipelineResourceStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineResourceStatus.
func (in *PipelineResourceStatus) DeepCopy() *PipelineResourceStatus {
if in == nil {
return nil
}
out := new(PipelineResourceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceDeclaration) DeepCopyInto(out *ResourceDeclaration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeclaration.
func (in *ResourceDeclaration) DeepCopy() *ResourceDeclaration {
if in == nil {
return nil
}
out := new(ResourceDeclaration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceParam) DeepCopyInto(out *ResourceParam) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceParam.
func (in *ResourceParam) DeepCopy() *ResourceParam {
if in == nil {
return nil
}
out := new(ResourceParam)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretParam) DeepCopyInto(out *SecretParam) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretParam.
func (in *SecretParam) DeepCopy() *SecretParam {
if in == nil {
return nil
}
out := new(SecretParam)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"encoding/json"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
// This package exists to avoid an import cycle between v1alpha1 and v1beta1.
// It contains common definitions needed by v1alpha1.Run and v1beta1.PipelineRun.
// +k8s:deepcopy-gen=true
// RunStatus defines the observed state of Run
type RunStatus struct {
duckv1.Status `json:",inline"`
// RunStatusFields inlines the status fields.
RunStatusFields `json:",inline"`
}
// +k8s:deepcopy-gen=true
// RunStatusFields holds the fields of Run's status. This is defined
// separately and inlined so that other types can readily consume these fields
// via duck typing.
type RunStatusFields struct {
// StartTime is the time the build is actually started.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty"`
// CompletionTime is the time the build completed.
// +optional
CompletionTime *metav1.Time `json:"completionTime,omitempty"`
// Results reports any output result values to be consumed by later
// tasks in a pipeline.
// +optional
Results []RunResult `json:"results,omitempty"`
// RetriesStatus contains the history of RunStatus, in case of a retry.
// +optional
RetriesStatus []RunStatus `json:"retriesStatus,omitempty"`
// ExtraFields holds arbitrary fields provided by the custom task
// controller.
ExtraFields runtime.RawExtension `json:"extraFields,omitempty"`
}
// RunResult used to describe the results of a task
type RunResult struct {
// Name the given name
Name string `json:"name"`
// Value the given value of the result
Value string `json:"value"`
}
var runCondSet = apis.NewBatchConditionSet()
// GetCondition returns the Condition matching the given type.
func (r *RunStatus) GetCondition(t apis.ConditionType) *apis.Condition {
return runCondSet.Manage(r).GetCondition(t)
}
// InitializeConditions will set all conditions in runCondSet to unknown for the PipelineRun
// and set the started time to the current time
func (r *RunStatus) InitializeConditions() {
started := false
if r.StartTime.IsZero() {
r.StartTime = &metav1.Time{Time: time.Now()}
started = true
}
conditionManager := runCondSet.Manage(r)
conditionManager.InitializeConditions()
// Ensure the started reason is set for the "Succeeded" condition
if started {
initialCondition := conditionManager.GetCondition(apis.ConditionSucceeded)
initialCondition.Reason = "Started"
conditionManager.SetCondition(*initialCondition)
}
}
// SetCondition sets the condition, unsetting previous conditions with the same
// type as necessary.
func (r *RunStatus) SetCondition(newCond *apis.Condition) {
if newCond != nil {
runCondSet.Manage(r).SetCondition(*newCond)
}
}
// MarkRunSucceeded changes the Succeeded condition to True with the provided reason and message.
func (r *RunStatus) MarkRunSucceeded(reason, messageFormat string, messageA ...interface{}) {
runCondSet.Manage(r).MarkTrueWithReason(apis.ConditionSucceeded, reason, messageFormat, messageA...)
succeeded := r.GetCondition(apis.ConditionSucceeded)
r.CompletionTime = &succeeded.LastTransitionTime.Inner
}
// MarkRunFailed changes the Succeeded condition to False with the provided reason and message.
func (r *RunStatus) MarkRunFailed(reason, messageFormat string, messageA ...interface{}) {
runCondSet.Manage(r).MarkFalse(apis.ConditionSucceeded, reason, messageFormat, messageA...)
succeeded := r.GetCondition(apis.ConditionSucceeded)
r.CompletionTime = &succeeded.LastTransitionTime.Inner
}
// MarkRunRunning changes the Succeeded condition to Unknown with the provided reason and message.
func (r *RunStatus) MarkRunRunning(reason, messageFormat string, messageA ...interface{}) {
runCondSet.Manage(r).MarkUnknown(apis.ConditionSucceeded, reason, messageFormat, messageA...)
}
// DecodeExtraFields deserializes the extra fields in the Run status.
func (r *RunStatus) DecodeExtraFields(into interface{}) error {
if len(r.ExtraFields.Raw) == 0 {
return nil
}
return json.Unmarshal(r.ExtraFields.Raw, into)
}
// EncodeExtraFields serializes the extra fields in the Run status.
func (r *RunStatus) EncodeExtraFields(from interface{}) error {
data, err := json.Marshal(from)
if err != nil {
return err
}
r.ExtraFields = runtime.RawExtension{
Raw: data,
}
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunStatus) DeepCopyInto(out *RunStatus) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
in.RunStatusFields.DeepCopyInto(&out.RunStatusFields)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunStatus.
func (in *RunStatus) DeepCopy() *RunStatus {
if in == nil {
return nil
}
out := new(RunStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunStatusFields) DeepCopyInto(out *RunStatusFields) {
*out = *in
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]RunResult, len(*in))
copy(*out, *in)
}
if in.RetriesStatus != nil {
in, out := &in.RetriesStatus, &out.RetriesStatus
*out = make([]RunStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.ExtraFields.DeepCopyInto(&out.ExtraFields)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunStatusFields.
func (in *RunStatusFields) DeepCopy() *RunStatusFields {
if in == nil {
return nil
}
out := new(RunStatusFields)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"encoding/json"
"time"
"github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
// This package contains common definitions needed by v1beta1.CustomRun and v1beta1.PipelineRun.
// +k8s:deepcopy-gen=true
// CustomRunStatus defines the observed state of CustomRun
type CustomRunStatus struct {
duckv1.Status `json:",inline"`
// CustomRunStatusFields inlines the status fields.
CustomRunStatusFields `json:",inline"`
}
// +k8s:deepcopy-gen=true
// CustomRunStatusFields holds the fields of CustomRun's status. This is defined
// separately and inlined so that other types can readily consume these fields
// via duck typing.
type CustomRunStatusFields struct {
// StartTime is the time the build is actually started.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty"`
// CompletionTime is the time the build completed.
// +optional
CompletionTime *metav1.Time `json:"completionTime,omitempty"`
// Results reports any output result values to be consumed by later
// tasks in a pipeline.
// +optional
Results []CustomRunResult `json:"results,omitempty"`
// RetriesStatus contains the history of CustomRunStatus, in case of a retry.
// See CustomRun.status (API version: tekton.dev/v1beta1)
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
RetriesStatus []CustomRunStatus `json:"retriesStatus,omitempty"`
// ExtraFields holds arbitrary fields provided by the custom task
// controller.
// +kubebuilder:pruning:PreserveUnknownFields
// +kubebuilder:validation:Schemaless
ExtraFields runtime.RawExtension `json:"extraFields,omitempty"`
}
// CustomRunResult used to describe the results of a task
type CustomRunResult struct {
// Name the given name
Name string `json:"name"`
// Value the given value of the result
Value string `json:"value"`
}
var customRunCondSet = apis.NewBatchConditionSet()
// GetCondition returns the Condition matching the given type.
func (r *CustomRunStatus) GetCondition(t apis.ConditionType) *apis.Condition {
return customRunCondSet.Manage(r).GetCondition(t)
}
// InitializeConditions will set all conditions in customRunCondSet to unknown
// and set the started time to the current time
func (r *CustomRunStatus) InitializeConditions() {
started := false
if r.StartTime.IsZero() {
r.StartTime = &metav1.Time{Time: time.Now()}
started = true
}
conditionManager := customRunCondSet.Manage(r)
conditionManager.InitializeConditions()
// Ensure the started reason is set for the "Succeeded" condition
if started {
initialCondition := conditionManager.GetCondition(apis.ConditionSucceeded)
initialCondition.Reason = "Started"
conditionManager.SetCondition(*initialCondition)
}
}
// SetCondition sets the condition, unsetting previous conditions with the same
// type as necessary.
func (r *CustomRunStatus) SetCondition(newCond *apis.Condition) {
if newCond != nil {
customRunCondSet.Manage(r).SetCondition(*newCond)
}
}
// MarkCustomRunSucceeded changes the Succeeded condition to True with the provided reason and message.
func (r *CustomRunStatus) MarkCustomRunSucceeded(reason, messageFormat string, messageA ...interface{}) {
customRunCondSet.Manage(r).MarkTrueWithReason(apis.ConditionSucceeded, reason, messageFormat, messageA...)
succeeded := r.GetCondition(apis.ConditionSucceeded)
r.CompletionTime = &succeeded.LastTransitionTime.Inner
}
// MarkCustomRunFailed changes the Succeeded condition to False with the provided reason and message.
func (r *CustomRunStatus) MarkCustomRunFailed(reason, messageFormat string, messageA ...interface{}) {
customRunCondSet.Manage(r).MarkFalse(apis.ConditionSucceeded, reason, messageFormat, messageA...)
succeeded := r.GetCondition(apis.ConditionSucceeded)
r.CompletionTime = &succeeded.LastTransitionTime.Inner
}
// MarkCustomRunRunning changes the Succeeded condition to Unknown with the provided reason and message.
func (r *CustomRunStatus) MarkCustomRunRunning(reason, messageFormat string, messageA ...interface{}) {
customRunCondSet.Manage(r).MarkUnknown(apis.ConditionSucceeded, reason, messageFormat, messageA...)
}
// DecodeExtraFields deserializes the extra fields in the CustomRun status.
func (r *CustomRunStatus) DecodeExtraFields(into interface{}) error {
if len(r.ExtraFields.Raw) == 0 {
return nil
}
return json.Unmarshal(r.ExtraFields.Raw, into)
}
// EncodeExtraFields serializes the extra fields in the CustomRun status.
func (r *CustomRunStatus) EncodeExtraFields(from interface{}) error {
data, err := json.Marshal(from)
if err != nil {
return err
}
r.ExtraFields = runtime.RawExtension{
Raw: data,
}
return nil
}
// FromRunStatus converts a v1alpha1.RunStatus into a corresponding v1beta1.CustomRunStatus
func FromRunStatus(orig v1alpha1.RunStatus) CustomRunStatus {
crs := CustomRunStatus{
Status: orig.Status,
CustomRunStatusFields: CustomRunStatusFields{
StartTime: orig.StartTime,
CompletionTime: orig.CompletionTime,
ExtraFields: orig.ExtraFields,
},
}
for _, origRes := range orig.Results {
crs.Results = append(crs.Results, CustomRunResult{
Name: origRes.Name,
Value: origRes.Value,
})
}
for _, origRetryStatus := range orig.RetriesStatus {
crs.RetriesStatus = append(crs.RetriesStatus, FromRunStatus(origRetryStatus))
}
return crs
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomRunStatus) DeepCopyInto(out *CustomRunStatus) {
*out = *in
in.Status.DeepCopyInto(&out.Status)
in.CustomRunStatusFields.DeepCopyInto(&out.CustomRunStatusFields)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRunStatus.
func (in *CustomRunStatus) DeepCopy() *CustomRunStatus {
if in == nil {
return nil
}
out := new(CustomRunStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomRunStatusFields) DeepCopyInto(out *CustomRunStatusFields) {
*out = *in
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]CustomRunResult, len(*in))
copy(*out, *in)
}
in.ExtraFields.DeepCopyInto(&out.ExtraFields)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunStatusFields.
func (in *CustomRunStatusFields) DeepCopy() *CustomRunStatusFields {
if in == nil {
return nil
}
out := new(CustomRunStatusFields)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validate
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
)
// MaxLength is the maximum length that an object's name can be
const MaxLength = validation.DNS1123LabelMaxLength
// ObjectMetadata validates that the given object's name is a valid DNS name and isn't longer than the max length
func ObjectMetadata(meta metav1.Object) *apis.FieldError {
name := meta.GetName()
if err := validation.IsDNS1123Subdomain(name); len(err) > 0 {
return &apis.FieldError{
Message: fmt.Sprintf("invalid resource name %q: must be a valid DNS label", name),
Paths: []string{"name"},
}
}
if len(name) > MaxLength {
return &apis.FieldError{
Message: "Invalid resource name: length must be no more than 63 characters",
Paths: []string{"name"},
}
}
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package version
import (
"encoding/json"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// SerializeToMetadata serializes the input field and adds it as an annotation to
// the metadata under the input key.
func SerializeToMetadata(meta *metav1.ObjectMeta, field interface{}, key string) error {
bytes, err := json.Marshal(field)
if err != nil {
return fmt.Errorf("error serializing field: %w", err)
}
if meta.Annotations == nil {
meta.Annotations = make(map[string]string)
}
meta.Annotations[key] = string(bytes)
return nil
}
// DeserializeFromMetadata takes the value of the input key from the metadata's annotations,
// deserializes it into "to", and removes the key from the metadata's annotations.
// Returns nil if the key is not present in the annotations.
func DeserializeFromMetadata(meta *metav1.ObjectMeta, to interface{}, key string) error {
if meta == nil || meta.Annotations == nil {
return nil
}
if str, ok := meta.Annotations[key]; ok {
if err := json.Unmarshal([]byte(str), to); err != nil {
return fmt.Errorf("error deserializing key %s from metadata: %w", key, err)
}
delete(meta.Annotations, key)
if len(meta.Annotations) == 0 {
meta.Annotations = nil
}
}
return nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
fmt "fmt"
http "net/http"
tektonv1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1"
tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1"
tektonv1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface
TektonV1beta1() tektonv1beta1.TektonV1beta1Interface
TektonV1() tektonv1.TektonV1Interface
}
// Clientset contains the clients for groups.
type Clientset struct {
*discovery.DiscoveryClient
tektonV1alpha1 *tektonv1alpha1.TektonV1alpha1Client
tektonV1beta1 *tektonv1beta1.TektonV1beta1Client
tektonV1 *tektonv1.TektonV1Client
}
// TektonV1alpha1 retrieves the TektonV1alpha1Client
func (c *Clientset) TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface {
return c.tektonV1alpha1
}
// TektonV1beta1 retrieves the TektonV1beta1Client
func (c *Clientset) TektonV1beta1() tektonv1beta1.TektonV1beta1Interface {
return c.tektonV1beta1
}
// TektonV1 retrieves the TektonV1Client
func (c *Clientset) TektonV1() tektonv1.TektonV1Interface {
return c.tektonV1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfig will generate a rate-limiter in configShallowCopy.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.UserAgent == "" {
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
}
// share the transport between all clients
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&configShallowCopy, httpClient)
}
// NewForConfigAndClient creates a new Clientset for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.tektonV1alpha1, err = tektonv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.tektonV1beta1, err = tektonv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.tektonV1, err = tektonv1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
cs, err := NewForConfig(c)
if err != nil {
panic(err)
}
return cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.tektonV1alpha1 = tektonv1alpha1.New(c)
cs.tektonV1beta1 = tektonv1beta1.New(c)
cs.tektonV1 = tektonv1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
tektonv1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1"
faketektonv1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake"
tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1"
faketektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake"
tektonv1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
faketektonv1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/testing"
)
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
//
// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves
// server side apply testing. NewClientset is only available when apply configurations are generated (e.g.
// via --with-applyconfig).
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{tracker: o}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
return cs
}
// Clientset implements clientset.Interface. Meant to be embedded into a
// struct to get a default implementation. This makes faking out just the method
// you want to test easier.
type Clientset struct {
testing.Fake
discovery *fakediscovery.FakeDiscovery
tracker testing.ObjectTracker
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.discovery
}
func (c *Clientset) Tracker() testing.ObjectTracker {
return c.tracker
}
var (
_ clientset.Interface = &Clientset{}
_ testing.FakeClient = &Clientset{}
)
// TektonV1alpha1 retrieves the TektonV1alpha1Client
func (c *Clientset) TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface {
return &faketektonv1alpha1.FakeTektonV1alpha1{Fake: &c.Fake}
}
// TektonV1beta1 retrieves the TektonV1beta1Client
func (c *Clientset) TektonV1beta1() tektonv1beta1.TektonV1beta1Interface {
return &faketektonv1beta1.FakeTektonV1beta1{Fake: &c.Fake}
}
// TektonV1 retrieves the TektonV1Client
func (c *Clientset) TektonV1() tektonv1.TektonV1Interface {
return &faketektonv1.FakeTektonV1{Fake: &c.Fake}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
tektonv1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
tektonv1alpha1.AddToScheme,
tektonv1beta1.AddToScheme,
tektonv1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(scheme))
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
tektonv1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
tektonv1alpha1.AddToScheme,
tektonv1beta1.AddToScheme,
tektonv1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1"
gentype "k8s.io/client-go/gentype"
)
// fakePipelines implements PipelineInterface
type fakePipelines struct {
*gentype.FakeClientWithList[*v1.Pipeline, *v1.PipelineList]
Fake *FakeTektonV1
}
func newFakePipelines(fake *FakeTektonV1, namespace string) pipelinev1.PipelineInterface {
return &fakePipelines{
gentype.NewFakeClientWithList[*v1.Pipeline, *v1.PipelineList](
fake.Fake,
namespace,
v1.SchemeGroupVersion.WithResource("pipelines"),
v1.SchemeGroupVersion.WithKind("Pipeline"),
func() *v1.Pipeline { return &v1.Pipeline{} },
func() *v1.PipelineList { return &v1.PipelineList{} },
func(dst, src *v1.PipelineList) { dst.ListMeta = src.ListMeta },
func(list *v1.PipelineList) []*v1.Pipeline { return gentype.ToPointerSlice(list.Items) },
func(list *v1.PipelineList, items []*v1.Pipeline) { list.Items = gentype.FromPointerSlice(items) },
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeTektonV1 struct {
*testing.Fake
}
func (c *FakeTektonV1) Pipelines(namespace string) v1.PipelineInterface {
return newFakePipelines(c, namespace)
}
func (c *FakeTektonV1) PipelineRuns(namespace string) v1.PipelineRunInterface {
return newFakePipelineRuns(c, namespace)
}
func (c *FakeTektonV1) Tasks(namespace string) v1.TaskInterface {
return newFakeTasks(c, namespace)
}
func (c *FakeTektonV1) TaskRuns(namespace string) v1.TaskRunInterface {
return newFakeTaskRuns(c, namespace)
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeTektonV1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1"
gentype "k8s.io/client-go/gentype"
)
// fakePipelineRuns implements PipelineRunInterface
type fakePipelineRuns struct {
*gentype.FakeClientWithList[*v1.PipelineRun, *v1.PipelineRunList]
Fake *FakeTektonV1
}
func newFakePipelineRuns(fake *FakeTektonV1, namespace string) pipelinev1.PipelineRunInterface {
return &fakePipelineRuns{
gentype.NewFakeClientWithList[*v1.PipelineRun, *v1.PipelineRunList](
fake.Fake,
namespace,
v1.SchemeGroupVersion.WithResource("pipelineruns"),
v1.SchemeGroupVersion.WithKind("PipelineRun"),
func() *v1.PipelineRun { return &v1.PipelineRun{} },
func() *v1.PipelineRunList { return &v1.PipelineRunList{} },
func(dst, src *v1.PipelineRunList) { dst.ListMeta = src.ListMeta },
func(list *v1.PipelineRunList) []*v1.PipelineRun { return gentype.ToPointerSlice(list.Items) },
func(list *v1.PipelineRunList, items []*v1.PipelineRun) { list.Items = gentype.FromPointerSlice(items) },
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1"
gentype "k8s.io/client-go/gentype"
)
// fakeTasks implements TaskInterface
type fakeTasks struct {
*gentype.FakeClientWithList[*v1.Task, *v1.TaskList]
Fake *FakeTektonV1
}
func newFakeTasks(fake *FakeTektonV1, namespace string) pipelinev1.TaskInterface {
return &fakeTasks{
gentype.NewFakeClientWithList[*v1.Task, *v1.TaskList](
fake.Fake,
namespace,
v1.SchemeGroupVersion.WithResource("tasks"),
v1.SchemeGroupVersion.WithKind("Task"),
func() *v1.Task { return &v1.Task{} },
func() *v1.TaskList { return &v1.TaskList{} },
func(dst, src *v1.TaskList) { dst.ListMeta = src.ListMeta },
func(list *v1.TaskList) []*v1.Task { return gentype.ToPointerSlice(list.Items) },
func(list *v1.TaskList, items []*v1.Task) { list.Items = gentype.FromPointerSlice(items) },
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1"
gentype "k8s.io/client-go/gentype"
)
// fakeTaskRuns implements TaskRunInterface
type fakeTaskRuns struct {
*gentype.FakeClientWithList[*v1.TaskRun, *v1.TaskRunList]
Fake *FakeTektonV1
}
func newFakeTaskRuns(fake *FakeTektonV1, namespace string) pipelinev1.TaskRunInterface {
return &fakeTaskRuns{
gentype.NewFakeClientWithList[*v1.TaskRun, *v1.TaskRunList](
fake.Fake,
namespace,
v1.SchemeGroupVersion.WithResource("taskruns"),
v1.SchemeGroupVersion.WithKind("TaskRun"),
func() *v1.TaskRun { return &v1.TaskRun{} },
func() *v1.TaskRunList { return &v1.TaskRunList{} },
func(dst, src *v1.TaskRunList) { dst.ListMeta = src.ListMeta },
func(list *v1.TaskRunList) []*v1.TaskRun { return gentype.ToPointerSlice(list.Items) },
func(list *v1.TaskRunList, items []*v1.TaskRun) { list.Items = gentype.FromPointerSlice(items) },
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// PipelinesGetter has a method to return a PipelineInterface.
// A group's client should implement this interface.
type PipelinesGetter interface {
Pipelines(namespace string) PipelineInterface
}
// PipelineInterface has methods to work with Pipeline resources.
type PipelineInterface interface {
Create(ctx context.Context, pipeline *pipelinev1.Pipeline, opts metav1.CreateOptions) (*pipelinev1.Pipeline, error)
Update(ctx context.Context, pipeline *pipelinev1.Pipeline, opts metav1.UpdateOptions) (*pipelinev1.Pipeline, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*pipelinev1.Pipeline, error)
List(ctx context.Context, opts metav1.ListOptions) (*pipelinev1.PipelineList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *pipelinev1.Pipeline, err error)
PipelineExpansion
}
// pipelines implements PipelineInterface
type pipelines struct {
*gentype.ClientWithList[*pipelinev1.Pipeline, *pipelinev1.PipelineList]
}
// newPipelines returns a Pipelines
func newPipelines(c *TektonV1Client, namespace string) *pipelines {
return &pipelines{
gentype.NewClientWithList[*pipelinev1.Pipeline, *pipelinev1.PipelineList](
"pipelines",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1.Pipeline { return &pipelinev1.Pipeline{} },
func() *pipelinev1.PipelineList { return &pipelinev1.PipelineList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
http "net/http"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type TektonV1Interface interface {
RESTClient() rest.Interface
PipelinesGetter
PipelineRunsGetter
TasksGetter
TaskRunsGetter
}
// TektonV1Client is used to interact with features provided by the tekton.dev group.
type TektonV1Client struct {
restClient rest.Interface
}
func (c *TektonV1Client) Pipelines(namespace string) PipelineInterface {
return newPipelines(c, namespace)
}
func (c *TektonV1Client) PipelineRuns(namespace string) PipelineRunInterface {
return newPipelineRuns(c, namespace)
}
func (c *TektonV1Client) Tasks(namespace string) TaskInterface {
return newTasks(c, namespace)
}
func (c *TektonV1Client) TaskRuns(namespace string) TaskRunInterface {
return newTaskRuns(c, namespace)
}
// NewForConfig creates a new TektonV1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*TektonV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new TektonV1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*TektonV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &TektonV1Client{client}, nil
}
// NewForConfigOrDie creates a new TektonV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *TektonV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new TektonV1Client for the given RESTClient.
func New(c rest.Interface) *TektonV1Client {
return &TektonV1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := pipelinev1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *TektonV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// PipelineRunsGetter has a method to return a PipelineRunInterface.
// A group's client should implement this interface.
type PipelineRunsGetter interface {
PipelineRuns(namespace string) PipelineRunInterface
}
// PipelineRunInterface has methods to work with PipelineRun resources.
type PipelineRunInterface interface {
Create(ctx context.Context, pipelineRun *pipelinev1.PipelineRun, opts metav1.CreateOptions) (*pipelinev1.PipelineRun, error)
Update(ctx context.Context, pipelineRun *pipelinev1.PipelineRun, opts metav1.UpdateOptions) (*pipelinev1.PipelineRun, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, pipelineRun *pipelinev1.PipelineRun, opts metav1.UpdateOptions) (*pipelinev1.PipelineRun, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*pipelinev1.PipelineRun, error)
List(ctx context.Context, opts metav1.ListOptions) (*pipelinev1.PipelineRunList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *pipelinev1.PipelineRun, err error)
PipelineRunExpansion
}
// pipelineRuns implements PipelineRunInterface
type pipelineRuns struct {
*gentype.ClientWithList[*pipelinev1.PipelineRun, *pipelinev1.PipelineRunList]
}
// newPipelineRuns returns a PipelineRuns
func newPipelineRuns(c *TektonV1Client, namespace string) *pipelineRuns {
return &pipelineRuns{
gentype.NewClientWithList[*pipelinev1.PipelineRun, *pipelinev1.PipelineRunList](
"pipelineruns",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1.PipelineRun { return &pipelinev1.PipelineRun{} },
func() *pipelinev1.PipelineRunList { return &pipelinev1.PipelineRunList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// TasksGetter has a method to return a TaskInterface.
// A group's client should implement this interface.
type TasksGetter interface {
Tasks(namespace string) TaskInterface
}
// TaskInterface has methods to work with Task resources.
type TaskInterface interface {
Create(ctx context.Context, task *pipelinev1.Task, opts metav1.CreateOptions) (*pipelinev1.Task, error)
Update(ctx context.Context, task *pipelinev1.Task, opts metav1.UpdateOptions) (*pipelinev1.Task, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*pipelinev1.Task, error)
List(ctx context.Context, opts metav1.ListOptions) (*pipelinev1.TaskList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *pipelinev1.Task, err error)
TaskExpansion
}
// tasks implements TaskInterface
type tasks struct {
*gentype.ClientWithList[*pipelinev1.Task, *pipelinev1.TaskList]
}
// newTasks returns a Tasks
func newTasks(c *TektonV1Client, namespace string) *tasks {
return &tasks{
gentype.NewClientWithList[*pipelinev1.Task, *pipelinev1.TaskList](
"tasks",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1.Task { return &pipelinev1.Task{} },
func() *pipelinev1.TaskList { return &pipelinev1.TaskList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// TaskRunsGetter has a method to return a TaskRunInterface.
// A group's client should implement this interface.
type TaskRunsGetter interface {
TaskRuns(namespace string) TaskRunInterface
}
// TaskRunInterface has methods to work with TaskRun resources.
type TaskRunInterface interface {
Create(ctx context.Context, taskRun *pipelinev1.TaskRun, opts metav1.CreateOptions) (*pipelinev1.TaskRun, error)
Update(ctx context.Context, taskRun *pipelinev1.TaskRun, opts metav1.UpdateOptions) (*pipelinev1.TaskRun, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, taskRun *pipelinev1.TaskRun, opts metav1.UpdateOptions) (*pipelinev1.TaskRun, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*pipelinev1.TaskRun, error)
List(ctx context.Context, opts metav1.ListOptions) (*pipelinev1.TaskRunList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *pipelinev1.TaskRun, err error)
TaskRunExpansion
}
// taskRuns implements TaskRunInterface
type taskRuns struct {
*gentype.ClientWithList[*pipelinev1.TaskRun, *pipelinev1.TaskRunList]
}
// newTaskRuns returns a TaskRuns
func newTaskRuns(c *TektonV1Client, namespace string) *taskRuns {
return &taskRuns{
gentype.NewClientWithList[*pipelinev1.TaskRun, *pipelinev1.TaskRunList](
"taskruns",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1.TaskRun { return &pipelinev1.TaskRun{} },
func() *pipelinev1.TaskRunList { return &pipelinev1.TaskRunList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeTektonV1alpha1 struct {
*testing.Fake
}
func (c *FakeTektonV1alpha1) Runs(namespace string) v1alpha1.RunInterface {
return newFakeRuns(c, namespace)
}
func (c *FakeTektonV1alpha1) StepActions(namespace string) v1alpha1.StepActionInterface {
return newFakeStepActions(c, namespace)
}
func (c *FakeTektonV1alpha1) VerificationPolicies(namespace string) v1alpha1.VerificationPolicyInterface {
return newFakeVerificationPolicies(c, namespace)
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeTektonV1alpha1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1"
gentype "k8s.io/client-go/gentype"
)
// fakeRuns implements RunInterface
type fakeRuns struct {
*gentype.FakeClientWithList[*v1alpha1.Run, *v1alpha1.RunList]
Fake *FakeTektonV1alpha1
}
func newFakeRuns(fake *FakeTektonV1alpha1, namespace string) pipelinev1alpha1.RunInterface {
return &fakeRuns{
gentype.NewFakeClientWithList[*v1alpha1.Run, *v1alpha1.RunList](
fake.Fake,
namespace,
v1alpha1.SchemeGroupVersion.WithResource("runs"),
v1alpha1.SchemeGroupVersion.WithKind("Run"),
func() *v1alpha1.Run { return &v1alpha1.Run{} },
func() *v1alpha1.RunList { return &v1alpha1.RunList{} },
func(dst, src *v1alpha1.RunList) { dst.ListMeta = src.ListMeta },
func(list *v1alpha1.RunList) []*v1alpha1.Run { return gentype.ToPointerSlice(list.Items) },
func(list *v1alpha1.RunList, items []*v1alpha1.Run) { list.Items = gentype.FromPointerSlice(items) },
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1"
gentype "k8s.io/client-go/gentype"
)
// fakeStepActions implements StepActionInterface
type fakeStepActions struct {
*gentype.FakeClientWithList[*v1alpha1.StepAction, *v1alpha1.StepActionList]
Fake *FakeTektonV1alpha1
}
func newFakeStepActions(fake *FakeTektonV1alpha1, namespace string) pipelinev1alpha1.StepActionInterface {
return &fakeStepActions{
gentype.NewFakeClientWithList[*v1alpha1.StepAction, *v1alpha1.StepActionList](
fake.Fake,
namespace,
v1alpha1.SchemeGroupVersion.WithResource("stepactions"),
v1alpha1.SchemeGroupVersion.WithKind("StepAction"),
func() *v1alpha1.StepAction { return &v1alpha1.StepAction{} },
func() *v1alpha1.StepActionList { return &v1alpha1.StepActionList{} },
func(dst, src *v1alpha1.StepActionList) { dst.ListMeta = src.ListMeta },
func(list *v1alpha1.StepActionList) []*v1alpha1.StepAction { return gentype.ToPointerSlice(list.Items) },
func(list *v1alpha1.StepActionList, items []*v1alpha1.StepAction) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1"
gentype "k8s.io/client-go/gentype"
)
// fakeVerificationPolicies implements VerificationPolicyInterface
type fakeVerificationPolicies struct {
*gentype.FakeClientWithList[*v1alpha1.VerificationPolicy, *v1alpha1.VerificationPolicyList]
Fake *FakeTektonV1alpha1
}
func newFakeVerificationPolicies(fake *FakeTektonV1alpha1, namespace string) pipelinev1alpha1.VerificationPolicyInterface {
return &fakeVerificationPolicies{
gentype.NewFakeClientWithList[*v1alpha1.VerificationPolicy, *v1alpha1.VerificationPolicyList](
fake.Fake,
namespace,
v1alpha1.SchemeGroupVersion.WithResource("verificationpolicies"),
v1alpha1.SchemeGroupVersion.WithKind("VerificationPolicy"),
func() *v1alpha1.VerificationPolicy { return &v1alpha1.VerificationPolicy{} },
func() *v1alpha1.VerificationPolicyList { return &v1alpha1.VerificationPolicyList{} },
func(dst, src *v1alpha1.VerificationPolicyList) { dst.ListMeta = src.ListMeta },
func(list *v1alpha1.VerificationPolicyList) []*v1alpha1.VerificationPolicy {
return gentype.ToPointerSlice(list.Items)
},
func(list *v1alpha1.VerificationPolicyList, items []*v1alpha1.VerificationPolicy) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
http "net/http"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type TektonV1alpha1Interface interface {
RESTClient() rest.Interface
RunsGetter
StepActionsGetter
VerificationPoliciesGetter
}
// TektonV1alpha1Client is used to interact with features provided by the tekton.dev group.
type TektonV1alpha1Client struct {
restClient rest.Interface
}
func (c *TektonV1alpha1Client) Runs(namespace string) RunInterface {
return newRuns(c, namespace)
}
func (c *TektonV1alpha1Client) StepActions(namespace string) StepActionInterface {
return newStepActions(c, namespace)
}
func (c *TektonV1alpha1Client) VerificationPolicies(namespace string) VerificationPolicyInterface {
return newVerificationPolicies(c, namespace)
}
// NewForConfig creates a new TektonV1alpha1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*TektonV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new TektonV1alpha1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*TektonV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &TektonV1alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new TektonV1alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *TektonV1alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new TektonV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *TektonV1alpha1Client {
return &TektonV1alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := pipelinev1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *TektonV1alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// RunsGetter has a method to return a RunInterface.
// A group's client should implement this interface.
type RunsGetter interface {
Runs(namespace string) RunInterface
}
// RunInterface has methods to work with Run resources.
type RunInterface interface {
Create(ctx context.Context, run *pipelinev1alpha1.Run, opts v1.CreateOptions) (*pipelinev1alpha1.Run, error)
Update(ctx context.Context, run *pipelinev1alpha1.Run, opts v1.UpdateOptions) (*pipelinev1alpha1.Run, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, run *pipelinev1alpha1.Run, opts v1.UpdateOptions) (*pipelinev1alpha1.Run, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1alpha1.Run, error)
List(ctx context.Context, opts v1.ListOptions) (*pipelinev1alpha1.RunList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1alpha1.Run, err error)
RunExpansion
}
// runs implements RunInterface
type runs struct {
*gentype.ClientWithList[*pipelinev1alpha1.Run, *pipelinev1alpha1.RunList]
}
// newRuns returns a Runs
func newRuns(c *TektonV1alpha1Client, namespace string) *runs {
return &runs{
gentype.NewClientWithList[*pipelinev1alpha1.Run, *pipelinev1alpha1.RunList](
"runs",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1alpha1.Run { return &pipelinev1alpha1.Run{} },
func() *pipelinev1alpha1.RunList { return &pipelinev1alpha1.RunList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// StepActionsGetter has a method to return a StepActionInterface.
// A group's client should implement this interface.
type StepActionsGetter interface {
StepActions(namespace string) StepActionInterface
}
// StepActionInterface has methods to work with StepAction resources.
type StepActionInterface interface {
Create(ctx context.Context, stepAction *pipelinev1alpha1.StepAction, opts v1.CreateOptions) (*pipelinev1alpha1.StepAction, error)
Update(ctx context.Context, stepAction *pipelinev1alpha1.StepAction, opts v1.UpdateOptions) (*pipelinev1alpha1.StepAction, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1alpha1.StepAction, error)
List(ctx context.Context, opts v1.ListOptions) (*pipelinev1alpha1.StepActionList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1alpha1.StepAction, err error)
StepActionExpansion
}
// stepActions implements StepActionInterface
type stepActions struct {
*gentype.ClientWithList[*pipelinev1alpha1.StepAction, *pipelinev1alpha1.StepActionList]
}
// newStepActions returns a StepActions
func newStepActions(c *TektonV1alpha1Client, namespace string) *stepActions {
return &stepActions{
gentype.NewClientWithList[*pipelinev1alpha1.StepAction, *pipelinev1alpha1.StepActionList](
"stepactions",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1alpha1.StepAction { return &pipelinev1alpha1.StepAction{} },
func() *pipelinev1alpha1.StepActionList { return &pipelinev1alpha1.StepActionList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// VerificationPoliciesGetter has a method to return a VerificationPolicyInterface.
// A group's client should implement this interface.
type VerificationPoliciesGetter interface {
VerificationPolicies(namespace string) VerificationPolicyInterface
}
// VerificationPolicyInterface has methods to work with VerificationPolicy resources.
type VerificationPolicyInterface interface {
Create(ctx context.Context, verificationPolicy *pipelinev1alpha1.VerificationPolicy, opts v1.CreateOptions) (*pipelinev1alpha1.VerificationPolicy, error)
Update(ctx context.Context, verificationPolicy *pipelinev1alpha1.VerificationPolicy, opts v1.UpdateOptions) (*pipelinev1alpha1.VerificationPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1alpha1.VerificationPolicy, error)
List(ctx context.Context, opts v1.ListOptions) (*pipelinev1alpha1.VerificationPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1alpha1.VerificationPolicy, err error)
VerificationPolicyExpansion
}
// verificationPolicies implements VerificationPolicyInterface
type verificationPolicies struct {
*gentype.ClientWithList[*pipelinev1alpha1.VerificationPolicy, *pipelinev1alpha1.VerificationPolicyList]
}
// newVerificationPolicies returns a VerificationPolicies
func newVerificationPolicies(c *TektonV1alpha1Client, namespace string) *verificationPolicies {
return &verificationPolicies{
gentype.NewClientWithList[*pipelinev1alpha1.VerificationPolicy, *pipelinev1alpha1.VerificationPolicyList](
"verificationpolicies",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1alpha1.VerificationPolicy { return &pipelinev1alpha1.VerificationPolicy{} },
func() *pipelinev1alpha1.VerificationPolicyList { return &pipelinev1alpha1.VerificationPolicyList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CustomRunsGetter has a method to return a CustomRunInterface.
// A group's client should implement this interface.
type CustomRunsGetter interface {
CustomRuns(namespace string) CustomRunInterface
}
// CustomRunInterface has methods to work with CustomRun resources.
type CustomRunInterface interface {
Create(ctx context.Context, customRun *pipelinev1beta1.CustomRun, opts v1.CreateOptions) (*pipelinev1beta1.CustomRun, error)
Update(ctx context.Context, customRun *pipelinev1beta1.CustomRun, opts v1.UpdateOptions) (*pipelinev1beta1.CustomRun, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, customRun *pipelinev1beta1.CustomRun, opts v1.UpdateOptions) (*pipelinev1beta1.CustomRun, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1beta1.CustomRun, error)
List(ctx context.Context, opts v1.ListOptions) (*pipelinev1beta1.CustomRunList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1beta1.CustomRun, err error)
CustomRunExpansion
}
// customRuns implements CustomRunInterface
type customRuns struct {
*gentype.ClientWithList[*pipelinev1beta1.CustomRun, *pipelinev1beta1.CustomRunList]
}
// newCustomRuns returns a CustomRuns
func newCustomRuns(c *TektonV1beta1Client, namespace string) *customRuns {
return &customRuns{
gentype.NewClientWithList[*pipelinev1beta1.CustomRun, *pipelinev1beta1.CustomRunList](
"customruns",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1beta1.CustomRun { return &pipelinev1beta1.CustomRun{} },
func() *pipelinev1beta1.CustomRunList { return &pipelinev1beta1.CustomRunList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
gentype "k8s.io/client-go/gentype"
)
// fakeCustomRuns implements CustomRunInterface
type fakeCustomRuns struct {
*gentype.FakeClientWithList[*v1beta1.CustomRun, *v1beta1.CustomRunList]
Fake *FakeTektonV1beta1
}
func newFakeCustomRuns(fake *FakeTektonV1beta1, namespace string) pipelinev1beta1.CustomRunInterface {
return &fakeCustomRuns{
gentype.NewFakeClientWithList[*v1beta1.CustomRun, *v1beta1.CustomRunList](
fake.Fake,
namespace,
v1beta1.SchemeGroupVersion.WithResource("customruns"),
v1beta1.SchemeGroupVersion.WithKind("CustomRun"),
func() *v1beta1.CustomRun { return &v1beta1.CustomRun{} },
func() *v1beta1.CustomRunList { return &v1beta1.CustomRunList{} },
func(dst, src *v1beta1.CustomRunList) { dst.ListMeta = src.ListMeta },
func(list *v1beta1.CustomRunList) []*v1beta1.CustomRun { return gentype.ToPointerSlice(list.Items) },
func(list *v1beta1.CustomRunList, items []*v1beta1.CustomRun) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
gentype "k8s.io/client-go/gentype"
)
// fakePipelines implements PipelineInterface
type fakePipelines struct {
*gentype.FakeClientWithList[*v1beta1.Pipeline, *v1beta1.PipelineList]
Fake *FakeTektonV1beta1
}
func newFakePipelines(fake *FakeTektonV1beta1, namespace string) pipelinev1beta1.PipelineInterface {
return &fakePipelines{
gentype.NewFakeClientWithList[*v1beta1.Pipeline, *v1beta1.PipelineList](
fake.Fake,
namespace,
v1beta1.SchemeGroupVersion.WithResource("pipelines"),
v1beta1.SchemeGroupVersion.WithKind("Pipeline"),
func() *v1beta1.Pipeline { return &v1beta1.Pipeline{} },
func() *v1beta1.PipelineList { return &v1beta1.PipelineList{} },
func(dst, src *v1beta1.PipelineList) { dst.ListMeta = src.ListMeta },
func(list *v1beta1.PipelineList) []*v1beta1.Pipeline { return gentype.ToPointerSlice(list.Items) },
func(list *v1beta1.PipelineList, items []*v1beta1.Pipeline) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeTektonV1beta1 struct {
*testing.Fake
}
func (c *FakeTektonV1beta1) CustomRuns(namespace string) v1beta1.CustomRunInterface {
return newFakeCustomRuns(c, namespace)
}
func (c *FakeTektonV1beta1) Pipelines(namespace string) v1beta1.PipelineInterface {
return newFakePipelines(c, namespace)
}
func (c *FakeTektonV1beta1) PipelineRuns(namespace string) v1beta1.PipelineRunInterface {
return newFakePipelineRuns(c, namespace)
}
func (c *FakeTektonV1beta1) StepActions(namespace string) v1beta1.StepActionInterface {
return newFakeStepActions(c, namespace)
}
func (c *FakeTektonV1beta1) Tasks(namespace string) v1beta1.TaskInterface {
return newFakeTasks(c, namespace)
}
func (c *FakeTektonV1beta1) TaskRuns(namespace string) v1beta1.TaskRunInterface {
return newFakeTaskRuns(c, namespace)
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeTektonV1beta1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
gentype "k8s.io/client-go/gentype"
)
// fakePipelineRuns implements PipelineRunInterface
type fakePipelineRuns struct {
*gentype.FakeClientWithList[*v1beta1.PipelineRun, *v1beta1.PipelineRunList]
Fake *FakeTektonV1beta1
}
func newFakePipelineRuns(fake *FakeTektonV1beta1, namespace string) pipelinev1beta1.PipelineRunInterface {
return &fakePipelineRuns{
gentype.NewFakeClientWithList[*v1beta1.PipelineRun, *v1beta1.PipelineRunList](
fake.Fake,
namespace,
v1beta1.SchemeGroupVersion.WithResource("pipelineruns"),
v1beta1.SchemeGroupVersion.WithKind("PipelineRun"),
func() *v1beta1.PipelineRun { return &v1beta1.PipelineRun{} },
func() *v1beta1.PipelineRunList { return &v1beta1.PipelineRunList{} },
func(dst, src *v1beta1.PipelineRunList) { dst.ListMeta = src.ListMeta },
func(list *v1beta1.PipelineRunList) []*v1beta1.PipelineRun { return gentype.ToPointerSlice(list.Items) },
func(list *v1beta1.PipelineRunList, items []*v1beta1.PipelineRun) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
gentype "k8s.io/client-go/gentype"
)
// fakeStepActions implements StepActionInterface
type fakeStepActions struct {
*gentype.FakeClientWithList[*v1beta1.StepAction, *v1beta1.StepActionList]
Fake *FakeTektonV1beta1
}
func newFakeStepActions(fake *FakeTektonV1beta1, namespace string) pipelinev1beta1.StepActionInterface {
return &fakeStepActions{
gentype.NewFakeClientWithList[*v1beta1.StepAction, *v1beta1.StepActionList](
fake.Fake,
namespace,
v1beta1.SchemeGroupVersion.WithResource("stepactions"),
v1beta1.SchemeGroupVersion.WithKind("StepAction"),
func() *v1beta1.StepAction { return &v1beta1.StepAction{} },
func() *v1beta1.StepActionList { return &v1beta1.StepActionList{} },
func(dst, src *v1beta1.StepActionList) { dst.ListMeta = src.ListMeta },
func(list *v1beta1.StepActionList) []*v1beta1.StepAction { return gentype.ToPointerSlice(list.Items) },
func(list *v1beta1.StepActionList, items []*v1beta1.StepAction) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
gentype "k8s.io/client-go/gentype"
)
// fakeTasks implements TaskInterface
type fakeTasks struct {
*gentype.FakeClientWithList[*v1beta1.Task, *v1beta1.TaskList]
Fake *FakeTektonV1beta1
}
func newFakeTasks(fake *FakeTektonV1beta1, namespace string) pipelinev1beta1.TaskInterface {
return &fakeTasks{
gentype.NewFakeClientWithList[*v1beta1.Task, *v1beta1.TaskList](
fake.Fake,
namespace,
v1beta1.SchemeGroupVersion.WithResource("tasks"),
v1beta1.SchemeGroupVersion.WithKind("Task"),
func() *v1beta1.Task { return &v1beta1.Task{} },
func() *v1beta1.TaskList { return &v1beta1.TaskList{} },
func(dst, src *v1beta1.TaskList) { dst.ListMeta = src.ListMeta },
func(list *v1beta1.TaskList) []*v1beta1.Task { return gentype.ToPointerSlice(list.Items) },
func(list *v1beta1.TaskList, items []*v1beta1.Task) { list.Items = gentype.FromPointerSlice(items) },
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
gentype "k8s.io/client-go/gentype"
)
// fakeTaskRuns implements TaskRunInterface
type fakeTaskRuns struct {
*gentype.FakeClientWithList[*v1beta1.TaskRun, *v1beta1.TaskRunList]
Fake *FakeTektonV1beta1
}
func newFakeTaskRuns(fake *FakeTektonV1beta1, namespace string) pipelinev1beta1.TaskRunInterface {
return &fakeTaskRuns{
gentype.NewFakeClientWithList[*v1beta1.TaskRun, *v1beta1.TaskRunList](
fake.Fake,
namespace,
v1beta1.SchemeGroupVersion.WithResource("taskruns"),
v1beta1.SchemeGroupVersion.WithKind("TaskRun"),
func() *v1beta1.TaskRun { return &v1beta1.TaskRun{} },
func() *v1beta1.TaskRunList { return &v1beta1.TaskRunList{} },
func(dst, src *v1beta1.TaskRunList) { dst.ListMeta = src.ListMeta },
func(list *v1beta1.TaskRunList) []*v1beta1.TaskRun { return gentype.ToPointerSlice(list.Items) },
func(list *v1beta1.TaskRunList, items []*v1beta1.TaskRun) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// PipelinesGetter has a method to return a PipelineInterface.
// A group's client should implement this interface.
type PipelinesGetter interface {
Pipelines(namespace string) PipelineInterface
}
// PipelineInterface has methods to work with Pipeline resources.
type PipelineInterface interface {
Create(ctx context.Context, pipeline *pipelinev1beta1.Pipeline, opts v1.CreateOptions) (*pipelinev1beta1.Pipeline, error)
Update(ctx context.Context, pipeline *pipelinev1beta1.Pipeline, opts v1.UpdateOptions) (*pipelinev1beta1.Pipeline, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1beta1.Pipeline, error)
List(ctx context.Context, opts v1.ListOptions) (*pipelinev1beta1.PipelineList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1beta1.Pipeline, err error)
PipelineExpansion
}
// pipelines implements PipelineInterface
type pipelines struct {
*gentype.ClientWithList[*pipelinev1beta1.Pipeline, *pipelinev1beta1.PipelineList]
}
// newPipelines returns a Pipelines
func newPipelines(c *TektonV1beta1Client, namespace string) *pipelines {
return &pipelines{
gentype.NewClientWithList[*pipelinev1beta1.Pipeline, *pipelinev1beta1.PipelineList](
"pipelines",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1beta1.Pipeline { return &pipelinev1beta1.Pipeline{} },
func() *pipelinev1beta1.PipelineList { return &pipelinev1beta1.PipelineList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
http "net/http"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type TektonV1beta1Interface interface {
RESTClient() rest.Interface
CustomRunsGetter
PipelinesGetter
PipelineRunsGetter
StepActionsGetter
TasksGetter
TaskRunsGetter
}
// TektonV1beta1Client is used to interact with features provided by the tekton.dev group.
type TektonV1beta1Client struct {
restClient rest.Interface
}
func (c *TektonV1beta1Client) CustomRuns(namespace string) CustomRunInterface {
return newCustomRuns(c, namespace)
}
func (c *TektonV1beta1Client) Pipelines(namespace string) PipelineInterface {
return newPipelines(c, namespace)
}
func (c *TektonV1beta1Client) PipelineRuns(namespace string) PipelineRunInterface {
return newPipelineRuns(c, namespace)
}
func (c *TektonV1beta1Client) StepActions(namespace string) StepActionInterface {
return newStepActions(c, namespace)
}
func (c *TektonV1beta1Client) Tasks(namespace string) TaskInterface {
return newTasks(c, namespace)
}
func (c *TektonV1beta1Client) TaskRuns(namespace string) TaskRunInterface {
return newTaskRuns(c, namespace)
}
// NewForConfig creates a new TektonV1beta1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*TektonV1beta1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new TektonV1beta1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*TektonV1beta1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &TektonV1beta1Client{client}, nil
}
// NewForConfigOrDie creates a new TektonV1beta1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *TektonV1beta1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new TektonV1beta1Client for the given RESTClient.
func New(c rest.Interface) *TektonV1beta1Client {
return &TektonV1beta1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := pipelinev1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *TektonV1beta1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// PipelineRunsGetter has a method to return a PipelineRunInterface.
// A group's client should implement this interface.
type PipelineRunsGetter interface {
PipelineRuns(namespace string) PipelineRunInterface
}
// PipelineRunInterface has methods to work with PipelineRun resources.
type PipelineRunInterface interface {
Create(ctx context.Context, pipelineRun *pipelinev1beta1.PipelineRun, opts v1.CreateOptions) (*pipelinev1beta1.PipelineRun, error)
Update(ctx context.Context, pipelineRun *pipelinev1beta1.PipelineRun, opts v1.UpdateOptions) (*pipelinev1beta1.PipelineRun, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, pipelineRun *pipelinev1beta1.PipelineRun, opts v1.UpdateOptions) (*pipelinev1beta1.PipelineRun, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1beta1.PipelineRun, error)
List(ctx context.Context, opts v1.ListOptions) (*pipelinev1beta1.PipelineRunList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1beta1.PipelineRun, err error)
PipelineRunExpansion
}
// pipelineRuns implements PipelineRunInterface
type pipelineRuns struct {
*gentype.ClientWithList[*pipelinev1beta1.PipelineRun, *pipelinev1beta1.PipelineRunList]
}
// newPipelineRuns returns a PipelineRuns
func newPipelineRuns(c *TektonV1beta1Client, namespace string) *pipelineRuns {
return &pipelineRuns{
gentype.NewClientWithList[*pipelinev1beta1.PipelineRun, *pipelinev1beta1.PipelineRunList](
"pipelineruns",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1beta1.PipelineRun { return &pipelinev1beta1.PipelineRun{} },
func() *pipelinev1beta1.PipelineRunList { return &pipelinev1beta1.PipelineRunList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// StepActionsGetter has a method to return a StepActionInterface.
// A group's client should implement this interface.
type StepActionsGetter interface {
StepActions(namespace string) StepActionInterface
}
// StepActionInterface has methods to work with StepAction resources.
type StepActionInterface interface {
Create(ctx context.Context, stepAction *pipelinev1beta1.StepAction, opts v1.CreateOptions) (*pipelinev1beta1.StepAction, error)
Update(ctx context.Context, stepAction *pipelinev1beta1.StepAction, opts v1.UpdateOptions) (*pipelinev1beta1.StepAction, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1beta1.StepAction, error)
List(ctx context.Context, opts v1.ListOptions) (*pipelinev1beta1.StepActionList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1beta1.StepAction, err error)
StepActionExpansion
}
// stepActions implements StepActionInterface
type stepActions struct {
*gentype.ClientWithList[*pipelinev1beta1.StepAction, *pipelinev1beta1.StepActionList]
}
// newStepActions returns a StepActions
func newStepActions(c *TektonV1beta1Client, namespace string) *stepActions {
return &stepActions{
gentype.NewClientWithList[*pipelinev1beta1.StepAction, *pipelinev1beta1.StepActionList](
"stepactions",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1beta1.StepAction { return &pipelinev1beta1.StepAction{} },
func() *pipelinev1beta1.StepActionList { return &pipelinev1beta1.StepActionList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// TasksGetter has a method to return a TaskInterface.
// A group's client should implement this interface.
type TasksGetter interface {
Tasks(namespace string) TaskInterface
}
// TaskInterface has methods to work with Task resources.
type TaskInterface interface {
Create(ctx context.Context, task *pipelinev1beta1.Task, opts v1.CreateOptions) (*pipelinev1beta1.Task, error)
Update(ctx context.Context, task *pipelinev1beta1.Task, opts v1.UpdateOptions) (*pipelinev1beta1.Task, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1beta1.Task, error)
List(ctx context.Context, opts v1.ListOptions) (*pipelinev1beta1.TaskList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1beta1.Task, err error)
TaskExpansion
}
// tasks implements TaskInterface
type tasks struct {
*gentype.ClientWithList[*pipelinev1beta1.Task, *pipelinev1beta1.TaskList]
}
// newTasks returns a Tasks
func newTasks(c *TektonV1beta1Client, namespace string) *tasks {
return &tasks{
gentype.NewClientWithList[*pipelinev1beta1.Task, *pipelinev1beta1.TaskList](
"tasks",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1beta1.Task { return &pipelinev1beta1.Task{} },
func() *pipelinev1beta1.TaskList { return &pipelinev1beta1.TaskList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// TaskRunsGetter has a method to return a TaskRunInterface.
// A group's client should implement this interface.
type TaskRunsGetter interface {
TaskRuns(namespace string) TaskRunInterface
}
// TaskRunInterface has methods to work with TaskRun resources.
type TaskRunInterface interface {
Create(ctx context.Context, taskRun *pipelinev1beta1.TaskRun, opts v1.CreateOptions) (*pipelinev1beta1.TaskRun, error)
Update(ctx context.Context, taskRun *pipelinev1beta1.TaskRun, opts v1.UpdateOptions) (*pipelinev1beta1.TaskRun, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, taskRun *pipelinev1beta1.TaskRun, opts v1.UpdateOptions) (*pipelinev1beta1.TaskRun, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1beta1.TaskRun, error)
List(ctx context.Context, opts v1.ListOptions) (*pipelinev1beta1.TaskRunList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1beta1.TaskRun, err error)
TaskRunExpansion
}
// taskRuns implements TaskRunInterface
type taskRuns struct {
*gentype.ClientWithList[*pipelinev1beta1.TaskRun, *pipelinev1beta1.TaskRunList]
}
// newTaskRuns returns a TaskRuns
func newTaskRuns(c *TektonV1beta1Client, namespace string) *taskRuns {
return &taskRuns{
gentype.NewClientWithList[*pipelinev1beta1.TaskRun, *pipelinev1beta1.TaskRunList](
"taskruns",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *pipelinev1beta1.TaskRun { return &pipelinev1beta1.TaskRun{} },
func() *pipelinev1beta1.TaskRunList { return &pipelinev1beta1.TaskRunList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipeline "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
transform cache.TransformFunc
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
// wg tracks how many goroutines were started.
wg sync.WaitGroup
// shuttingDown is true when Shutdown has been called. It may still be running
// because it needs to wait for goroutines.
shuttingDown bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// WithTransform sets a transform on all informers.
func WithTransform(transform cache.TransformFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.transform = transform
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.shuttingDown {
return
}
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
f.wg.Add(1)
// We need a new variable in each loop iteration,
// otherwise the goroutine would use the loop variable
// and that keeps changing.
informer := informer
go func() {
defer f.wg.Done()
informer.Run(stopCh)
}()
f.startedInformers[informerType] = true
}
}
}
func (f *sharedInformerFactory) Shutdown() {
f.lock.Lock()
f.shuttingDown = true
f.lock.Unlock()
// Will return immediately if there is nothing to wait for.
f.wg.Wait()
}
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
informer.SetTransform(f.transform)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
//
// It is typically used like this:
//
// ctx, cancel := context.Background()
// defer cancel()
// factory := NewSharedInformerFactory(client, resyncPeriod)
// defer factory.WaitForStop() // Returns immediately if nothing was started.
// genericInformer := factory.ForResource(resource)
// typedInformer := factory.SomeAPIGroup().V1().SomeType()
// factory.Start(ctx.Done()) // Start processing these informers.
// synced := factory.WaitForCacheSync(ctx.Done())
// for v, ok := range synced {
// if !ok {
// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
// return
// }
// }
//
// // Creating informers can also be created after Start, but then
// // Start must be called again:
// anotherGenericInformer := factory.ForResource(resource)
// factory.Start(ctx.Done())
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
// Start initializes all requested informers. They are handled in goroutines
// which run until the stop channel gets closed.
// Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync.
Start(stopCh <-chan struct{})
// Shutdown marks a factory as shutting down. At that point no new
// informers can be started anymore and Start will return without
// doing anything.
//
// In addition, Shutdown blocks until all goroutines have terminated. For that
// to happen, the close channel(s) that they were started with must be closed,
// either before Shutdown gets called or while it is waiting.
//
// Shutdown may be called multiple times, even concurrently. All such calls will
// block until all goroutines have terminated.
Shutdown()
// WaitForCacheSync blocks until all started informers' caches were synced
// or the stop channel gets closed.
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
// ForResource gives generic access to a shared informer of the matching type.
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
Tekton() pipeline.Interface
}
func (f *sharedInformerFactory) Tekton() pipeline.Interface {
return pipeline.New(f, f.namespace, f.tweakListOptions)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
fmt "fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
// sharedInformers based on type
type GenericInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.GenericLister
}
type genericInformer struct {
informer cache.SharedIndexInformer
resource schema.GroupResource
}
// Informer returns the SharedIndexInformer.
func (f *genericInformer) Informer() cache.SharedIndexInformer {
return f.informer
}
// Lister returns the GenericLister.
func (f *genericInformer) Lister() cache.GenericLister {
return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
}
// ForResource gives generic access to a shared informer of the matching type
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=tekton.dev, Version=v1
case v1.SchemeGroupVersion.WithResource("pipelines"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1().Pipelines().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("pipelineruns"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1().PipelineRuns().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("tasks"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1().Tasks().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("taskruns"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1().TaskRuns().Informer()}, nil
// Group=tekton.dev, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("runs"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().Runs().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("stepactions"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().StepActions().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("verificationpolicies"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().VerificationPolicies().Informer()}, nil
// Group=tekton.dev, Version=v1beta1
case v1beta1.SchemeGroupVersion.WithResource("customruns"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().CustomRuns().Informer()}, nil
case v1beta1.SchemeGroupVersion.WithResource("pipelines"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().Pipelines().Informer()}, nil
case v1beta1.SchemeGroupVersion.WithResource("pipelineruns"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().PipelineRuns().Informer()}, nil
case v1beta1.SchemeGroupVersion.WithResource("stepactions"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().StepActions().Informer()}, nil
case v1beta1.SchemeGroupVersion.WithResource("tasks"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().Tasks().Informer()}, nil
case v1beta1.SchemeGroupVersion.WithResource("taskruns"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().TaskRuns().Informer()}, nil
}
return nil, fmt.Errorf("no informer found for %v", resource)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package pipeline
import (
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1 provides access to shared informers for resources in V1.
V1() v1.Interface
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
// V1beta1 provides access to shared informers for resources in V1beta1.
V1beta1() v1beta1.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1 returns a new v1.Interface.
func (g *group) V1() v1.Interface {
return v1.New(g.factory, g.namespace, g.tweakListOptions)
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}
// V1beta1 returns a new v1beta1.Interface.
func (g *group) V1beta1() v1beta1.Interface {
return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// Pipelines returns a PipelineInformer.
Pipelines() PipelineInformer
// PipelineRuns returns a PipelineRunInformer.
PipelineRuns() PipelineRunInformer
// Tasks returns a TaskInformer.
Tasks() TaskInformer
// TaskRuns returns a TaskRunInformer.
TaskRuns() TaskRunInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// Pipelines returns a PipelineInformer.
func (v *version) Pipelines() PipelineInformer {
return &pipelineInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// PipelineRuns returns a PipelineRunInformer.
func (v *version) PipelineRuns() PipelineRunInformer {
return &pipelineRunInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// Tasks returns a TaskInformer.
func (v *version) Tasks() TaskInformer {
return &taskInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// TaskRuns returns a TaskRunInformer.
func (v *version) TaskRuns() TaskRunInformer {
return &taskRunInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
context "context"
time "time"
apispipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// PipelineInformer provides access to a shared informer and lister for
// Pipelines.
type PipelineInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1.PipelineLister
}
type pipelineInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewPipelineInformer constructs a new informer for Pipeline type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewPipelineInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPipelineInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredPipelineInformer constructs a new informer for Pipeline type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPipelineInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1().Pipelines(namespace).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1().Pipelines(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1.Pipeline{},
resyncPeriod,
indexers,
)
}
func (f *pipelineInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPipelineInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *pipelineInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1.Pipeline{}, f.defaultInformer)
}
func (f *pipelineInformer) Lister() pipelinev1.PipelineLister {
return pipelinev1.NewPipelineLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
context "context"
time "time"
apispipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// PipelineRunInformer provides access to a shared informer and lister for
// PipelineRuns.
type PipelineRunInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1.PipelineRunLister
}
type pipelineRunInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewPipelineRunInformer constructs a new informer for PipelineRun type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewPipelineRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPipelineRunInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredPipelineRunInformer constructs a new informer for PipelineRun type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPipelineRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1().PipelineRuns(namespace).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1().PipelineRuns(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1.PipelineRun{},
resyncPeriod,
indexers,
)
}
func (f *pipelineRunInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPipelineRunInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *pipelineRunInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1.PipelineRun{}, f.defaultInformer)
}
func (f *pipelineRunInformer) Lister() pipelinev1.PipelineRunLister {
return pipelinev1.NewPipelineRunLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
context "context"
time "time"
apispipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// TaskInformer provides access to a shared informer and lister for
// Tasks.
type TaskInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1.TaskLister
}
type taskInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewTaskInformer constructs a new informer for Task type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewTaskInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredTaskInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredTaskInformer constructs a new informer for Task type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredTaskInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1().Tasks(namespace).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1().Tasks(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1.Task{},
resyncPeriod,
indexers,
)
}
func (f *taskInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredTaskInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *taskInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1.Task{}, f.defaultInformer)
}
func (f *taskInformer) Lister() pipelinev1.TaskLister {
return pipelinev1.NewTaskLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
context "context"
time "time"
apispipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// TaskRunInformer provides access to a shared informer and lister for
// TaskRuns.
type TaskRunInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1.TaskRunLister
}
type taskRunInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewTaskRunInformer constructs a new informer for TaskRun type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewTaskRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredTaskRunInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredTaskRunInformer constructs a new informer for TaskRun type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredTaskRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1().TaskRuns(namespace).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1().TaskRuns(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1.TaskRun{},
resyncPeriod,
indexers,
)
}
func (f *taskRunInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredTaskRunInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *taskRunInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1.TaskRun{}, f.defaultInformer)
}
func (f *taskRunInformer) Lister() pipelinev1.TaskRunLister {
return pipelinev1.NewTaskRunLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// Runs returns a RunInformer.
Runs() RunInformer
// StepActions returns a StepActionInformer.
StepActions() StepActionInformer
// VerificationPolicies returns a VerificationPolicyInformer.
VerificationPolicies() VerificationPolicyInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// Runs returns a RunInformer.
func (v *version) Runs() RunInformer {
return &runInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// StepActions returns a StepActionInformer.
func (v *version) StepActions() StepActionInformer {
return &stepActionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// VerificationPolicies returns a VerificationPolicyInformer.
func (v *version) VerificationPolicies() VerificationPolicyInformer {
return &verificationPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
time "time"
apispipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// RunInformer provides access to a shared informer and lister for
// Runs.
type RunInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1alpha1.RunLister
}
type runInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewRunInformer constructs a new informer for Run type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredRunInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredRunInformer constructs a new informer for Run type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1alpha1().Runs(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1alpha1().Runs(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1alpha1.Run{},
resyncPeriod,
indexers,
)
}
func (f *runInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredRunInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *runInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1alpha1.Run{}, f.defaultInformer)
}
func (f *runInformer) Lister() pipelinev1alpha1.RunLister {
return pipelinev1alpha1.NewRunLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
time "time"
apispipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// StepActionInformer provides access to a shared informer and lister for
// StepActions.
type StepActionInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1alpha1.StepActionLister
}
type stepActionInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewStepActionInformer constructs a new informer for StepAction type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewStepActionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredStepActionInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredStepActionInformer constructs a new informer for StepAction type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredStepActionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1alpha1().StepActions(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1alpha1().StepActions(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1alpha1.StepAction{},
resyncPeriod,
indexers,
)
}
func (f *stepActionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredStepActionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *stepActionInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1alpha1.StepAction{}, f.defaultInformer)
}
func (f *stepActionInformer) Lister() pipelinev1alpha1.StepActionLister {
return pipelinev1alpha1.NewStepActionLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
time "time"
apispipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// VerificationPolicyInformer provides access to a shared informer and lister for
// VerificationPolicies.
type VerificationPolicyInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1alpha1.VerificationPolicyLister
}
type verificationPolicyInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewVerificationPolicyInformer constructs a new informer for VerificationPolicy type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewVerificationPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredVerificationPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredVerificationPolicyInformer constructs a new informer for VerificationPolicy type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredVerificationPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1alpha1().VerificationPolicies(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1alpha1().VerificationPolicies(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1alpha1.VerificationPolicy{},
resyncPeriod,
indexers,
)
}
func (f *verificationPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredVerificationPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *verificationPolicyInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1alpha1.VerificationPolicy{}, f.defaultInformer)
}
func (f *verificationPolicyInformer) Lister() pipelinev1alpha1.VerificationPolicyLister {
return pipelinev1alpha1.NewVerificationPolicyLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
time "time"
apispipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// CustomRunInformer provides access to a shared informer and lister for
// CustomRuns.
type CustomRunInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1beta1.CustomRunLister
}
type customRunInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewCustomRunInformer constructs a new informer for CustomRun type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewCustomRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredCustomRunInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredCustomRunInformer constructs a new informer for CustomRun type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCustomRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().CustomRuns(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().CustomRuns(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1beta1.CustomRun{},
resyncPeriod,
indexers,
)
}
func (f *customRunInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredCustomRunInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *customRunInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1beta1.CustomRun{}, f.defaultInformer)
}
func (f *customRunInformer) Lister() pipelinev1beta1.CustomRunLister {
return pipelinev1beta1.NewCustomRunLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// CustomRuns returns a CustomRunInformer.
CustomRuns() CustomRunInformer
// Pipelines returns a PipelineInformer.
Pipelines() PipelineInformer
// PipelineRuns returns a PipelineRunInformer.
PipelineRuns() PipelineRunInformer
// StepActions returns a StepActionInformer.
StepActions() StepActionInformer
// Tasks returns a TaskInformer.
Tasks() TaskInformer
// TaskRuns returns a TaskRunInformer.
TaskRuns() TaskRunInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// CustomRuns returns a CustomRunInformer.
func (v *version) CustomRuns() CustomRunInformer {
return &customRunInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// Pipelines returns a PipelineInformer.
func (v *version) Pipelines() PipelineInformer {
return &pipelineInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// PipelineRuns returns a PipelineRunInformer.
func (v *version) PipelineRuns() PipelineRunInformer {
return &pipelineRunInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// StepActions returns a StepActionInformer.
func (v *version) StepActions() StepActionInformer {
return &stepActionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// Tasks returns a TaskInformer.
func (v *version) Tasks() TaskInformer {
return &taskInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// TaskRuns returns a TaskRunInformer.
func (v *version) TaskRuns() TaskRunInformer {
return &taskRunInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
time "time"
apispipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// PipelineInformer provides access to a shared informer and lister for
// Pipelines.
type PipelineInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1beta1.PipelineLister
}
type pipelineInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewPipelineInformer constructs a new informer for Pipeline type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewPipelineInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPipelineInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredPipelineInformer constructs a new informer for Pipeline type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPipelineInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().Pipelines(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().Pipelines(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1beta1.Pipeline{},
resyncPeriod,
indexers,
)
}
func (f *pipelineInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPipelineInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *pipelineInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1beta1.Pipeline{}, f.defaultInformer)
}
func (f *pipelineInformer) Lister() pipelinev1beta1.PipelineLister {
return pipelinev1beta1.NewPipelineLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
time "time"
apispipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// PipelineRunInformer provides access to a shared informer and lister for
// PipelineRuns.
type PipelineRunInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1beta1.PipelineRunLister
}
type pipelineRunInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewPipelineRunInformer constructs a new informer for PipelineRun type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewPipelineRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPipelineRunInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredPipelineRunInformer constructs a new informer for PipelineRun type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPipelineRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().PipelineRuns(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().PipelineRuns(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1beta1.PipelineRun{},
resyncPeriod,
indexers,
)
}
func (f *pipelineRunInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPipelineRunInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *pipelineRunInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1beta1.PipelineRun{}, f.defaultInformer)
}
func (f *pipelineRunInformer) Lister() pipelinev1beta1.PipelineRunLister {
return pipelinev1beta1.NewPipelineRunLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
time "time"
apispipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// StepActionInformer provides access to a shared informer and lister for
// StepActions.
type StepActionInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1beta1.StepActionLister
}
type stepActionInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewStepActionInformer constructs a new informer for StepAction type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewStepActionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredStepActionInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredStepActionInformer constructs a new informer for StepAction type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredStepActionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().StepActions(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().StepActions(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1beta1.StepAction{},
resyncPeriod,
indexers,
)
}
func (f *stepActionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredStepActionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *stepActionInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1beta1.StepAction{}, f.defaultInformer)
}
func (f *stepActionInformer) Lister() pipelinev1beta1.StepActionLister {
return pipelinev1beta1.NewStepActionLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
time "time"
apispipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// TaskInformer provides access to a shared informer and lister for
// Tasks.
type TaskInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1beta1.TaskLister
}
type taskInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewTaskInformer constructs a new informer for Task type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewTaskInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredTaskInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredTaskInformer constructs a new informer for Task type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredTaskInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().Tasks(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().Tasks(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1beta1.Task{},
resyncPeriod,
indexers,
)
}
func (f *taskInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredTaskInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *taskInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1beta1.Task{}, f.defaultInformer)
}
func (f *taskInformer) Lister() pipelinev1beta1.TaskLister {
return pipelinev1beta1.NewTaskLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
time "time"
apispipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// TaskRunInformer provides access to a shared informer and lister for
// TaskRuns.
type TaskRunInformer interface {
Informer() cache.SharedIndexInformer
Lister() pipelinev1beta1.TaskRunLister
}
type taskRunInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewTaskRunInformer constructs a new informer for TaskRun type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewTaskRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredTaskRunInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredTaskRunInformer constructs a new informer for TaskRun type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredTaskRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().TaskRuns(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1beta1().TaskRuns(namespace).Watch(context.TODO(), options)
},
},
&apispipelinev1beta1.TaskRun{},
resyncPeriod,
indexers,
)
}
func (f *taskRunInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredTaskRunInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *taskRunInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apispipelinev1beta1.TaskRun{}, f.defaultInformer)
}
func (f *taskRunInformer) Lister() pipelinev1beta1.TaskRunLister {
return pipelinev1beta1.NewTaskRunLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package client
import (
context "context"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
rest "k8s.io/client-go/rest"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterClient(withClientFromConfig)
injection.Default.RegisterClientFetcher(func(ctx context.Context) interface{} {
return Get(ctx)
})
}
// Key is used as the key for associating information with a context.Context.
type Key struct{}
func withClientFromConfig(ctx context.Context, cfg *rest.Config) context.Context {
return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg))
}
// Get extracts the versioned.Interface client from the context.
func Get(ctx context.Context) versioned.Interface {
untyped := ctx.Value(Key{})
if untyped == nil {
if injection.GetConfig(ctx) == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/clientset/versioned.Interface from context. This context is not the application context (which is typically given to constructors via sharedmain).")
} else {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/clientset/versioned.Interface from context.")
}
}
return untyped.(versioned.Interface)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
runtime "k8s.io/apimachinery/pkg/runtime"
rest "k8s.io/client-go/rest"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Fake.RegisterClient(withClient)
injection.Fake.RegisterClientFetcher(func(ctx context.Context) interface{} {
return Get(ctx)
})
}
func withClient(ctx context.Context, cfg *rest.Config) context.Context {
ctx, _ = With(ctx)
return ctx
}
func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) {
cs := fake.NewSimpleClientset(objects...)
return context.WithValue(ctx, client.Key{}, cs), cs
}
// Get extracts the Kubernetes client from the context.
func Get(ctx context.Context) *fake.Clientset {
untyped := ctx.Value(client.Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake.Clientset from context.")
}
return untyped.(*fake.Clientset)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package factory
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/informers/externalversions"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformerFactory(withInformerFactory)
}
// Key is used as the key for associating information with a context.Context.
type Key struct{}
func withInformerFactory(ctx context.Context) context.Context {
c := client.Get(ctx)
opts := make([]externalversions.SharedInformerOption, 0, 1)
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
return context.WithValue(ctx, Key{},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
// Get extracts the InformerFactory from the context.
func Get(ctx context.Context) externalversions.SharedInformerFactory {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions.SharedInformerFactory from context.")
}
return untyped.(externalversions.SharedInformerFactory)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/informers/externalversions"
fake "github.com/tektoncd/pipeline/pkg/client/injection/client/fake"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = factory.Get
func init() {
injection.Fake.RegisterInformerFactory(withInformerFactory)
}
func withInformerFactory(ctx context.Context) context.Context {
c := fake.Get(ctx)
opts := make([]externalversions.SharedInformerOption, 0, 1)
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
return context.WithValue(ctx, factory.Key{},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fakeFilteredFactory
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/informers/externalversions"
fake "github.com/tektoncd/pipeline/pkg/client/injection/client/fake"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterInformerFactory(withInformerFactory)
}
func withInformerFactory(ctx context.Context) context.Context {
c := fake.Get(ctx)
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
for _, selector := range labelSelectors {
selectorVal := selector
opts := []externalversions.SharedInformerOption{}
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
opts = append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) {
l.LabelSelector = selectorVal
}))
ctx = context.WithValue(ctx, filtered.Key{Selector: selectorVal},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
return ctx
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filteredFactory
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/informers/externalversions"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformerFactory(withInformerFactory)
}
// Key is used as the key for associating information with a context.Context.
type Key struct {
Selector string
}
type LabelKey struct{}
func WithSelectors(ctx context.Context, selector ...string) context.Context {
return context.WithValue(ctx, LabelKey{}, selector)
}
func withInformerFactory(ctx context.Context) context.Context {
c := client.Get(ctx)
untyped := ctx.Value(LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
for _, selector := range labelSelectors {
selectorVal := selector
opts := []externalversions.SharedInformerOption{}
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
opts = append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) {
l.LabelSelector = selectorVal
}))
ctx = context.WithValue(ctx, Key{Selector: selectorVal},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
return ctx
}
// Get extracts the InformerFactory from the context.
func Get(ctx context.Context, selector string) externalversions.SharedInformerFactory {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions.SharedInformerFactory with selector %s from context.", selector)
}
return untyped.(externalversions.SharedInformerFactory)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
pipeline "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipeline"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = pipeline.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1().Pipelines()
return context.WithValue(ctx, pipeline.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipeline/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1().Pipelines()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1().Pipelines()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1.PipelineInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1.PipelineInformer with selector %s from context.", selector)
}
return untyped.(v1.PipelineInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipeline
import (
context "context"
v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1().Pipelines()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1.PipelineInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1.PipelineInformer from context.")
}
return untyped.(v1.PipelineInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
pipelinerun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = pipelinerun.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1().PipelineRuns()
return context.WithValue(ctx, pipelinerun.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1().PipelineRuns()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1().PipelineRuns()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1.PipelineRunInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1.PipelineRunInformer with selector %s from context.", selector)
}
return untyped.(v1.PipelineRunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipelinerun
import (
context "context"
v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1().PipelineRuns()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1.PipelineRunInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1.PipelineRunInformer from context.")
}
return untyped.(v1.PipelineRunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
task "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/task"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = task.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1().Tasks()
return context.WithValue(ctx, task.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/task/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1().Tasks()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1().Tasks()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1.TaskInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1.TaskInformer with selector %s from context.", selector)
}
return untyped.(v1.TaskInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package task
import (
context "context"
v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1().Tasks()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1.TaskInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1.TaskInformer from context.")
}
return untyped.(v1.TaskInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
taskrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = taskrun.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1().TaskRuns()
return context.WithValue(ctx, taskrun.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1().TaskRuns()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1().TaskRuns()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1.TaskRunInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1.TaskRunInformer with selector %s from context.", selector)
}
return untyped.(v1.TaskRunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package taskrun
import (
context "context"
v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1().TaskRuns()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1.TaskRunInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1.TaskRunInformer from context.")
}
return untyped.(v1.TaskRunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
run "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/run"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = run.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1alpha1().Runs()
return context.WithValue(ctx, run.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/run/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1alpha1().Runs()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1alpha1().Runs()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1alpha1.RunInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1.RunInformer with selector %s from context.", selector)
}
return untyped.(v1alpha1.RunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package run
import (
context "context"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1alpha1().Runs()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1alpha1.RunInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1.RunInformer from context.")
}
return untyped.(v1alpha1.RunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
stepaction "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/stepaction"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = stepaction.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1alpha1().StepActions()
return context.WithValue(ctx, stepaction.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/stepaction/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1alpha1().StepActions()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1alpha1().StepActions()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1alpha1.StepActionInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1.StepActionInformer with selector %s from context.", selector)
}
return untyped.(v1alpha1.StepActionInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package stepaction
import (
context "context"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1alpha1().StepActions()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1alpha1.StepActionInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1.StepActionInformer from context.")
}
return untyped.(v1alpha1.StepActionInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
verificationpolicy "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = verificationpolicy.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1alpha1().VerificationPolicies()
return context.WithValue(ctx, verificationpolicy.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1alpha1().VerificationPolicies()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1alpha1().VerificationPolicies()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1alpha1.VerificationPolicyInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1.VerificationPolicyInformer with selector %s from context.", selector)
}
return untyped.(v1alpha1.VerificationPolicyInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package verificationpolicy
import (
context "context"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1alpha1().VerificationPolicies()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1alpha1.VerificationPolicyInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1.VerificationPolicyInformer from context.")
}
return untyped.(v1alpha1.VerificationPolicyInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package customrun
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1beta1().CustomRuns()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1beta1.CustomRunInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.CustomRunInformer from context.")
}
return untyped.(v1beta1.CustomRunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
customrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = customrun.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1beta1().CustomRuns()
return context.WithValue(ctx, customrun.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1beta1().CustomRuns()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1beta1.CustomRunInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.CustomRunInformer with selector %s from context.", selector)
}
return untyped.(v1beta1.CustomRunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1beta1().CustomRuns()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
pipeline "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipeline"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = pipeline.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1beta1().Pipelines()
return context.WithValue(ctx, pipeline.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipeline/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1beta1().Pipelines()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1beta1().Pipelines()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1beta1.PipelineInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.PipelineInformer with selector %s from context.", selector)
}
return untyped.(v1beta1.PipelineInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipeline
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1beta1().Pipelines()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1beta1.PipelineInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.PipelineInformer from context.")
}
return untyped.(v1beta1.PipelineInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
pipelinerun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = pipelinerun.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1beta1().PipelineRuns()
return context.WithValue(ctx, pipelinerun.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1beta1().PipelineRuns()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1beta1().PipelineRuns()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1beta1.PipelineRunInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.PipelineRunInformer with selector %s from context.", selector)
}
return untyped.(v1beta1.PipelineRunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipelinerun
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1beta1().PipelineRuns()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1beta1.PipelineRunInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.PipelineRunInformer from context.")
}
return untyped.(v1beta1.PipelineRunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
stepaction "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/stepaction"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = stepaction.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1beta1().StepActions()
return context.WithValue(ctx, stepaction.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/stepaction/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1beta1().StepActions()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1beta1().StepActions()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1beta1.StepActionInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.StepActionInformer with selector %s from context.", selector)
}
return untyped.(v1beta1.StepActionInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package stepaction
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1beta1().StepActions()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1beta1.StepActionInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.StepActionInformer from context.")
}
return untyped.(v1beta1.StepActionInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
task "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/task"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = task.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1beta1().Tasks()
return context.WithValue(ctx, task.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/task/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1beta1().Tasks()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1beta1().Tasks()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1beta1.TaskInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.TaskInformer with selector %s from context.", selector)
}
return untyped.(v1beta1.TaskInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package task
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1beta1().Tasks()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1beta1.TaskInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.TaskInformer from context.")
}
return untyped.(v1beta1.TaskInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
taskrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = taskrun.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1beta1().TaskRuns()
return context.WithValue(ctx, taskrun.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1beta1().TaskRuns()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1beta1().TaskRuns()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1beta1.TaskRunInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.TaskRunInformer with selector %s from context.", selector)
}
return untyped.(v1beta1.TaskRunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package taskrun
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1beta1().TaskRuns()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1beta1.TaskRunInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.TaskRunInformer from context.")
}
return untyped.(v1beta1.TaskRunInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipeline
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
pipeline "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipeline"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "pipeline-controller"
defaultFinalizerName = "pipelines.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
pipelineInformer := pipeline.Get(ctx)
lister := pipelineInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.Pipeline"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipeline
import (
context "context"
json "encoding/json"
fmt "fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1.Pipeline.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1.Pipeline. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1.Pipeline) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1.Pipeline.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1.Pipeline. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1.Pipeline) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1.Pipeline if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1.Pipeline.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1.Pipeline) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1.Pipeline) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1.Pipeline resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1.PipelineLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1.PipelineLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.Pipelines(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, corev1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.Pipeline, desiredFinalizers sets.Set[string]) (*v1.Pipeline, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1().Pipelines(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, corev1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, corev1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1.Pipeline) (*v1.Pipeline, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.Pipeline, reconcileEvent reconciler.Event) (*v1.Pipeline, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == corev1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipeline
import (
fmt "fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1.Pipeline) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipelinerun
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
pipelinerun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "pipelinerun-controller"
defaultFinalizerName = "pipelineruns.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
pipelinerunInformer := pipelinerun.Get(ctx)
lister := pipelinerunInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.PipelineRun"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipelinerun
import (
context "context"
json "encoding/json"
fmt "fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
zap "go.uber.org/zap"
zapcore "go.uber.org/zap/zapcore"
corev1 "k8s.io/api/core/v1"
equality "k8s.io/apimachinery/pkg/api/equality"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
kmp "knative.dev/pkg/kmp"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1.PipelineRun.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1.PipelineRun. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1.PipelineRun) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1.PipelineRun.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1.PipelineRun. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1.PipelineRun) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1.PipelineRun if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1.PipelineRun.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1.PipelineRun) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1.PipelineRun) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1.PipelineRun resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1.PipelineRunLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
// skipStatusUpdates configures whether or not this reconciler automatically updates
// the status of the reconciled resource.
skipStatusUpdates bool
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1.PipelineRunLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.PipelineRuns(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Synchronize the status.
switch {
case r.skipStatusUpdates:
// This reconciler implementation is configured to skip resource updates.
// This may mean this reconciler does not observe spec, but reconciles external changes.
case equality.Semantic.DeepEqual(original.Status, resource.Status):
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the injectionInformer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
case !s.isLeader:
// High-availability reconcilers may have many replicas watching the resource, but only
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
if err = r.updateStatus(ctx, logger, original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
return err
}
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, corev1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1.PipelineRun, desired *v1.PipelineRun) error {
existing = existing.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
getter := r.Client.TektonV1().PipelineRuns(desired.Namespace)
existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
if logger.Desugar().Core().Enabled(zapcore.DebugLevel) {
if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
logger.Debug("Updating status with: ", diff)
}
}
existing.Status = desired.Status
updater := r.Client.TektonV1().PipelineRuns(existing.Namespace)
_, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{})
return err
})
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.PipelineRun, desiredFinalizers sets.Set[string]) (*v1.PipelineRun, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1().PipelineRuns(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, corev1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, corev1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1.PipelineRun) (*v1.PipelineRun, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.PipelineRun, reconcileEvent reconciler.Event) (*v1.PipelineRun, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == corev1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipelinerun
import (
fmt "fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1.PipelineRun) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package task
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
task "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/task"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "task-controller"
defaultFinalizerName = "tasks.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
taskInformer := task.Get(ctx)
lister := taskInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.Task"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package task
import (
context "context"
json "encoding/json"
fmt "fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1.Task.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1.Task. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1.Task) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1.Task.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1.Task. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1.Task) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1.Task if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1.Task.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1.Task) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1.Task) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1.Task resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1.TaskLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1.TaskLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.Tasks(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, corev1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.Task, desiredFinalizers sets.Set[string]) (*v1.Task, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1().Tasks(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, corev1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, corev1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1.Task) (*v1.Task, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.Task, reconcileEvent reconciler.Event) (*v1.Task, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == corev1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package task
import (
fmt "fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1.Task) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package taskrun
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
taskrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "taskrun-controller"
defaultFinalizerName = "taskruns.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
taskrunInformer := taskrun.Get(ctx)
lister := taskrunInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.TaskRun"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package taskrun
import (
context "context"
json "encoding/json"
fmt "fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
zap "go.uber.org/zap"
zapcore "go.uber.org/zap/zapcore"
corev1 "k8s.io/api/core/v1"
equality "k8s.io/apimachinery/pkg/api/equality"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
kmp "knative.dev/pkg/kmp"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1.TaskRun.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1.TaskRun. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1.TaskRun) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1.TaskRun.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1.TaskRun. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1.TaskRun) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1.TaskRun if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1.TaskRun.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1.TaskRun) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1.TaskRun) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1.TaskRun resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1.TaskRunLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
// skipStatusUpdates configures whether or not this reconciler automatically updates
// the status of the reconciled resource.
skipStatusUpdates bool
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1.TaskRunLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.TaskRuns(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Synchronize the status.
switch {
case r.skipStatusUpdates:
// This reconciler implementation is configured to skip resource updates.
// This may mean this reconciler does not observe spec, but reconciles external changes.
case equality.Semantic.DeepEqual(original.Status, resource.Status):
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the injectionInformer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
case !s.isLeader:
// High-availability reconcilers may have many replicas watching the resource, but only
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
if err = r.updateStatus(ctx, logger, original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
return err
}
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, corev1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1.TaskRun, desired *v1.TaskRun) error {
existing = existing.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
getter := r.Client.TektonV1().TaskRuns(desired.Namespace)
existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
if logger.Desugar().Core().Enabled(zapcore.DebugLevel) {
if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
logger.Debug("Updating status with: ", diff)
}
}
existing.Status = desired.Status
updater := r.Client.TektonV1().TaskRuns(existing.Namespace)
_, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{})
return err
})
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.TaskRun, desiredFinalizers sets.Set[string]) (*v1.TaskRun, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1().TaskRuns(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, corev1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, corev1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1.TaskRun) (*v1.TaskRun, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.TaskRun, reconcileEvent reconciler.Event) (*v1.TaskRun, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == corev1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package taskrun
import (
fmt "fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1.TaskRun) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package run
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
run "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/run"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "run-controller"
defaultFinalizerName = "runs.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
runInformer := run.Get(ctx)
lister := runInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.Run"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package run
import (
context "context"
json "encoding/json"
fmt "fmt"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1"
zap "go.uber.org/zap"
zapcore "go.uber.org/zap/zapcore"
v1 "k8s.io/api/core/v1"
equality "k8s.io/apimachinery/pkg/api/equality"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
kmp "knative.dev/pkg/kmp"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1alpha1.Run.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1alpha1.Run. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1alpha1.Run) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1alpha1.Run.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1alpha1.Run. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1alpha1.Run) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1alpha1.Run if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1alpha1.Run.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1alpha1.Run) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1alpha1.Run) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1alpha1.Run resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1alpha1.RunLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
// skipStatusUpdates configures whether or not this reconciler automatically updates
// the status of the reconciled resource.
skipStatusUpdates bool
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1alpha1.RunLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.Runs(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
if !r.skipStatusUpdates {
reconciler.PreProcessReconcile(ctx, resource)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
if !r.skipStatusUpdates {
reconciler.PostProcessReconcile(ctx, resource, original)
}
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Synchronize the status.
switch {
case r.skipStatusUpdates:
// This reconciler implementation is configured to skip resource updates.
// This may mean this reconciler does not observe spec, but reconciles external changes.
case equality.Semantic.DeepEqual(original.Status, resource.Status):
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the injectionInformer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
case !s.isLeader:
// High-availability reconcilers may have many replicas watching the resource, but only
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
if err = r.updateStatus(ctx, logger, original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
return err
}
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1alpha1.Run, desired *v1alpha1.Run) error {
existing = existing.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
getter := r.Client.TektonV1alpha1().Runs(desired.Namespace)
existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
if logger.Desugar().Core().Enabled(zapcore.DebugLevel) {
if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
logger.Debug("Updating status with: ", diff)
}
}
existing.Status = desired.Status
updater := r.Client.TektonV1alpha1().Runs(existing.Namespace)
_, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{})
return err
})
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.Run, desiredFinalizers sets.Set[string]) (*v1alpha1.Run, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1alpha1().Runs(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1alpha1.Run) (*v1alpha1.Run, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1alpha1.Run, reconcileEvent reconciler.Event) (*v1alpha1.Run, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package run
import (
fmt "fmt"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1alpha1.Run) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package stepaction
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
stepaction "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/stepaction"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "stepaction-controller"
defaultFinalizerName = "stepactions.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
stepactionInformer := stepaction.Get(ctx)
lister := stepactionInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.StepAction"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package stepaction
import (
context "context"
json "encoding/json"
fmt "fmt"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1"
zap "go.uber.org/zap"
v1 "k8s.io/api/core/v1"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1alpha1.StepAction.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1alpha1.StepAction. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1alpha1.StepAction) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1alpha1.StepAction.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1alpha1.StepAction. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1alpha1.StepAction) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1alpha1.StepAction if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1alpha1.StepAction.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1alpha1.StepAction) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1alpha1.StepAction) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1alpha1.StepAction resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1alpha1.StepActionLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1alpha1.StepActionLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.StepActions(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.StepAction, desiredFinalizers sets.Set[string]) (*v1alpha1.StepAction, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1alpha1().StepActions(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1alpha1.StepAction) (*v1alpha1.StepAction, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1alpha1.StepAction, reconcileEvent reconciler.Event) (*v1alpha1.StepAction, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package stepaction
import (
fmt "fmt"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1alpha1.StepAction) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package verificationpolicy
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
verificationpolicy "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "verificationpolicy-controller"
defaultFinalizerName = "verificationpolicies.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
verificationpolicyInformer := verificationpolicy.Get(ctx)
lister := verificationpolicyInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.VerificationPolicy"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package verificationpolicy
import (
context "context"
json "encoding/json"
fmt "fmt"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1"
zap "go.uber.org/zap"
v1 "k8s.io/api/core/v1"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1alpha1.VerificationPolicy.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1alpha1.VerificationPolicy. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1alpha1.VerificationPolicy) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1alpha1.VerificationPolicy.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1alpha1.VerificationPolicy. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1alpha1.VerificationPolicy) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1alpha1.VerificationPolicy if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1alpha1.VerificationPolicy.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1alpha1.VerificationPolicy) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1alpha1.VerificationPolicy) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1alpha1.VerificationPolicy resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1alpha1.VerificationPolicyLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1alpha1.VerificationPolicyLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.VerificationPolicies(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.VerificationPolicy, desiredFinalizers sets.Set[string]) (*v1alpha1.VerificationPolicy, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1alpha1().VerificationPolicies(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1alpha1.VerificationPolicy) (*v1alpha1.VerificationPolicy, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1alpha1.VerificationPolicy, reconcileEvent reconciler.Event) (*v1alpha1.VerificationPolicy, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package verificationpolicy
import (
fmt "fmt"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1alpha1.VerificationPolicy) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package customrun
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
customrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "customrun-controller"
defaultFinalizerName = "customruns.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
customrunInformer := customrun.Get(ctx)
lister := customrunInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.CustomRun"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package customrun
import (
context "context"
json "encoding/json"
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
zap "go.uber.org/zap"
zapcore "go.uber.org/zap/zapcore"
v1 "k8s.io/api/core/v1"
equality "k8s.io/apimachinery/pkg/api/equality"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
kmp "knative.dev/pkg/kmp"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.CustomRun.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1beta1.CustomRun. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1beta1.CustomRun) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1beta1.CustomRun.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1beta1.CustomRun. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1beta1.CustomRun) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.CustomRun if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1beta1.CustomRun.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1beta1.CustomRun) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1beta1.CustomRun) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1beta1.CustomRun resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1beta1.CustomRunLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
// skipStatusUpdates configures whether or not this reconciler automatically updates
// the status of the reconciled resource.
skipStatusUpdates bool
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1beta1.CustomRunLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.CustomRuns(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
if !r.skipStatusUpdates {
reconciler.PreProcessReconcile(ctx, resource)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
if !r.skipStatusUpdates {
reconciler.PostProcessReconcile(ctx, resource, original)
}
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Synchronize the status.
switch {
case r.skipStatusUpdates:
// This reconciler implementation is configured to skip resource updates.
// This may mean this reconciler does not observe spec, but reconciles external changes.
case equality.Semantic.DeepEqual(original.Status, resource.Status):
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the injectionInformer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
case !s.isLeader:
// High-availability reconcilers may have many replicas watching the resource, but only
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
if err = r.updateStatus(ctx, logger, original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
return err
}
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1beta1.CustomRun, desired *v1beta1.CustomRun) error {
existing = existing.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
getter := r.Client.TektonV1beta1().CustomRuns(desired.Namespace)
existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
if logger.Desugar().Core().Enabled(zapcore.DebugLevel) {
if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
logger.Debug("Updating status with: ", diff)
}
}
existing.Status = desired.Status
updater := r.Client.TektonV1beta1().CustomRuns(existing.Namespace)
_, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{})
return err
})
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1beta1.CustomRun, desiredFinalizers sets.Set[string]) (*v1beta1.CustomRun, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1beta1().CustomRuns(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1beta1.CustomRun) (*v1beta1.CustomRun, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.CustomRun, reconcileEvent reconciler.Event) (*v1beta1.CustomRun, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package customrun
import (
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1beta1.CustomRun) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipeline
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
pipeline "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipeline"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "pipeline-controller"
defaultFinalizerName = "pipelines.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
pipelineInformer := pipeline.Get(ctx)
lister := pipelineInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.Pipeline"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipeline
import (
context "context"
json "encoding/json"
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
zap "go.uber.org/zap"
v1 "k8s.io/api/core/v1"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.Pipeline.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1beta1.Pipeline. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1beta1.Pipeline) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1beta1.Pipeline.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1beta1.Pipeline. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1beta1.Pipeline) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.Pipeline if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1beta1.Pipeline.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1beta1.Pipeline) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1beta1.Pipeline) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1beta1.Pipeline resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1beta1.PipelineLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1beta1.PipelineLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.Pipelines(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1beta1.Pipeline, desiredFinalizers sets.Set[string]) (*v1beta1.Pipeline, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1beta1().Pipelines(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1beta1.Pipeline) (*v1beta1.Pipeline, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.Pipeline, reconcileEvent reconciler.Event) (*v1beta1.Pipeline, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipeline
import (
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1beta1.Pipeline) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipelinerun
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
pipelinerun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "pipelinerun-controller"
defaultFinalizerName = "pipelineruns.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
pipelinerunInformer := pipelinerun.Get(ctx)
lister := pipelinerunInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.PipelineRun"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipelinerun
import (
context "context"
json "encoding/json"
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
zap "go.uber.org/zap"
zapcore "go.uber.org/zap/zapcore"
v1 "k8s.io/api/core/v1"
equality "k8s.io/apimachinery/pkg/api/equality"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
kmp "knative.dev/pkg/kmp"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.PipelineRun.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1beta1.PipelineRun. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1beta1.PipelineRun) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1beta1.PipelineRun.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1beta1.PipelineRun. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1beta1.PipelineRun) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.PipelineRun if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1beta1.PipelineRun.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1beta1.PipelineRun) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1beta1.PipelineRun) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1beta1.PipelineRun resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1beta1.PipelineRunLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
// skipStatusUpdates configures whether or not this reconciler automatically updates
// the status of the reconciled resource.
skipStatusUpdates bool
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1beta1.PipelineRunLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.PipelineRuns(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Synchronize the status.
switch {
case r.skipStatusUpdates:
// This reconciler implementation is configured to skip resource updates.
// This may mean this reconciler does not observe spec, but reconciles external changes.
case equality.Semantic.DeepEqual(original.Status, resource.Status):
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the injectionInformer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
case !s.isLeader:
// High-availability reconcilers may have many replicas watching the resource, but only
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
if err = r.updateStatus(ctx, logger, original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
return err
}
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1beta1.PipelineRun, desired *v1beta1.PipelineRun) error {
existing = existing.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
getter := r.Client.TektonV1beta1().PipelineRuns(desired.Namespace)
existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
if logger.Desugar().Core().Enabled(zapcore.DebugLevel) {
if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
logger.Debug("Updating status with: ", diff)
}
}
existing.Status = desired.Status
updater := r.Client.TektonV1beta1().PipelineRuns(existing.Namespace)
_, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{})
return err
})
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1beta1.PipelineRun, desiredFinalizers sets.Set[string]) (*v1beta1.PipelineRun, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1beta1().PipelineRuns(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1beta1.PipelineRun) (*v1beta1.PipelineRun, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.PipelineRun, reconcileEvent reconciler.Event) (*v1beta1.PipelineRun, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipelinerun
import (
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1beta1.PipelineRun) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package stepaction
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
stepaction "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/stepaction"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "stepaction-controller"
defaultFinalizerName = "stepactions.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
stepactionInformer := stepaction.Get(ctx)
lister := stepactionInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.StepAction"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package stepaction
import (
context "context"
json "encoding/json"
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
zap "go.uber.org/zap"
v1 "k8s.io/api/core/v1"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.StepAction.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1beta1.StepAction. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1beta1.StepAction) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1beta1.StepAction.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1beta1.StepAction. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1beta1.StepAction) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.StepAction if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1beta1.StepAction.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1beta1.StepAction) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1beta1.StepAction) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1beta1.StepAction resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1beta1.StepActionLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1beta1.StepActionLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.StepActions(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1beta1.StepAction, desiredFinalizers sets.Set[string]) (*v1beta1.StepAction, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1beta1().StepActions(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1beta1.StepAction) (*v1beta1.StepAction, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.StepAction, reconcileEvent reconciler.Event) (*v1beta1.StepAction, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package stepaction
import (
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1beta1.StepAction) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package task
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
task "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/task"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "task-controller"
defaultFinalizerName = "tasks.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
taskInformer := task.Get(ctx)
lister := taskInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.Task"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package task
import (
context "context"
json "encoding/json"
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
zap "go.uber.org/zap"
v1 "k8s.io/api/core/v1"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.Task.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1beta1.Task. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1beta1.Task) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1beta1.Task.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1beta1.Task. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1beta1.Task) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.Task if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1beta1.Task.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1beta1.Task) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1beta1.Task) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1beta1.Task resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1beta1.TaskLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1beta1.TaskLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.Tasks(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1beta1.Task, desiredFinalizers sets.Set[string]) (*v1beta1.Task, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1beta1().Tasks(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1beta1.Task) (*v1beta1.Task, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.Task, reconcileEvent reconciler.Event) (*v1beta1.Task, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package task
import (
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1beta1.Task) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package taskrun
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/injection/client"
taskrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "taskrun-controller"
defaultFinalizerName = "taskruns.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
taskrunInformer := taskrun.Get(ctx)
lister := taskrunInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "tekton.dev.TaskRun"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package taskrun
import (
context "context"
json "encoding/json"
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
zap "go.uber.org/zap"
zapcore "go.uber.org/zap/zapcore"
v1 "k8s.io/api/core/v1"
equality "k8s.io/apimachinery/pkg/api/equality"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
kmp "knative.dev/pkg/kmp"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.TaskRun.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1beta1.TaskRun. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1beta1.TaskRun.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1beta1.TaskRun. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.TaskRun if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1beta1.TaskRun.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1beta1.TaskRun resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister pipelinev1beta1.TaskRunLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
// skipStatusUpdates configures whether or not this reconciler automatically updates
// the status of the reconciled resource.
skipStatusUpdates bool
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1beta1.TaskRunLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.TaskRuns(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Synchronize the status.
switch {
case r.skipStatusUpdates:
// This reconciler implementation is configured to skip resource updates.
// This may mean this reconciler does not observe spec, but reconciles external changes.
case equality.Semantic.DeepEqual(original.Status, resource.Status):
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the injectionInformer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
case !s.isLeader:
// High-availability reconcilers may have many replicas watching the resource, but only
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
if err = r.updateStatus(ctx, logger, original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
return err
}
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1beta1.TaskRun, desired *v1beta1.TaskRun) error {
existing = existing.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
getter := r.Client.TektonV1beta1().TaskRuns(desired.Namespace)
existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
if logger.Desugar().Core().Enabled(zapcore.DebugLevel) {
if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
logger.Debug("Updating status with: ", diff)
}
}
existing.Status = desired.Status
updater := r.Client.TektonV1beta1().TaskRuns(existing.Namespace)
_, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{})
return err
})
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1beta1.TaskRun, desiredFinalizers sets.Set[string]) (*v1beta1.TaskRun, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.TektonV1beta1().TaskRuns(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1beta1.TaskRun) (*v1beta1.TaskRun, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.TaskRun, reconcileEvent reconciler.Event) (*v1beta1.TaskRun, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package taskrun
import (
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1beta1.TaskRun) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// PipelineLister helps list Pipelines.
// All objects returned here must be treated as read-only.
type PipelineLister interface {
// List lists all Pipelines in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1.Pipeline, err error)
// Pipelines returns an object that can list and get Pipelines.
Pipelines(namespace string) PipelineNamespaceLister
PipelineListerExpansion
}
// pipelineLister implements the PipelineLister interface.
type pipelineLister struct {
listers.ResourceIndexer[*pipelinev1.Pipeline]
}
// NewPipelineLister returns a new PipelineLister.
func NewPipelineLister(indexer cache.Indexer) PipelineLister {
return &pipelineLister{listers.New[*pipelinev1.Pipeline](indexer, pipelinev1.Resource("pipeline"))}
}
// Pipelines returns an object that can list and get Pipelines.
func (s *pipelineLister) Pipelines(namespace string) PipelineNamespaceLister {
return pipelineNamespaceLister{listers.NewNamespaced[*pipelinev1.Pipeline](s.ResourceIndexer, namespace)}
}
// PipelineNamespaceLister helps list and get Pipelines.
// All objects returned here must be treated as read-only.
type PipelineNamespaceLister interface {
// List lists all Pipelines in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1.Pipeline, err error)
// Get retrieves the Pipeline from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1.Pipeline, error)
PipelineNamespaceListerExpansion
}
// pipelineNamespaceLister implements the PipelineNamespaceLister
// interface.
type pipelineNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1.Pipeline]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// PipelineRunLister helps list PipelineRuns.
// All objects returned here must be treated as read-only.
type PipelineRunLister interface {
// List lists all PipelineRuns in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1.PipelineRun, err error)
// PipelineRuns returns an object that can list and get PipelineRuns.
PipelineRuns(namespace string) PipelineRunNamespaceLister
PipelineRunListerExpansion
}
// pipelineRunLister implements the PipelineRunLister interface.
type pipelineRunLister struct {
listers.ResourceIndexer[*pipelinev1.PipelineRun]
}
// NewPipelineRunLister returns a new PipelineRunLister.
func NewPipelineRunLister(indexer cache.Indexer) PipelineRunLister {
return &pipelineRunLister{listers.New[*pipelinev1.PipelineRun](indexer, pipelinev1.Resource("pipelinerun"))}
}
// PipelineRuns returns an object that can list and get PipelineRuns.
func (s *pipelineRunLister) PipelineRuns(namespace string) PipelineRunNamespaceLister {
return pipelineRunNamespaceLister{listers.NewNamespaced[*pipelinev1.PipelineRun](s.ResourceIndexer, namespace)}
}
// PipelineRunNamespaceLister helps list and get PipelineRuns.
// All objects returned here must be treated as read-only.
type PipelineRunNamespaceLister interface {
// List lists all PipelineRuns in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1.PipelineRun, err error)
// Get retrieves the PipelineRun from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1.PipelineRun, error)
PipelineRunNamespaceListerExpansion
}
// pipelineRunNamespaceLister implements the PipelineRunNamespaceLister
// interface.
type pipelineRunNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1.PipelineRun]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// TaskLister helps list Tasks.
// All objects returned here must be treated as read-only.
type TaskLister interface {
// List lists all Tasks in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1.Task, err error)
// Tasks returns an object that can list and get Tasks.
Tasks(namespace string) TaskNamespaceLister
TaskListerExpansion
}
// taskLister implements the TaskLister interface.
type taskLister struct {
listers.ResourceIndexer[*pipelinev1.Task]
}
// NewTaskLister returns a new TaskLister.
func NewTaskLister(indexer cache.Indexer) TaskLister {
return &taskLister{listers.New[*pipelinev1.Task](indexer, pipelinev1.Resource("task"))}
}
// Tasks returns an object that can list and get Tasks.
func (s *taskLister) Tasks(namespace string) TaskNamespaceLister {
return taskNamespaceLister{listers.NewNamespaced[*pipelinev1.Task](s.ResourceIndexer, namespace)}
}
// TaskNamespaceLister helps list and get Tasks.
// All objects returned here must be treated as read-only.
type TaskNamespaceLister interface {
// List lists all Tasks in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1.Task, err error)
// Get retrieves the Task from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1.Task, error)
TaskNamespaceListerExpansion
}
// taskNamespaceLister implements the TaskNamespaceLister
// interface.
type taskNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1.Task]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// TaskRunLister helps list TaskRuns.
// All objects returned here must be treated as read-only.
type TaskRunLister interface {
// List lists all TaskRuns in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1.TaskRun, err error)
// TaskRuns returns an object that can list and get TaskRuns.
TaskRuns(namespace string) TaskRunNamespaceLister
TaskRunListerExpansion
}
// taskRunLister implements the TaskRunLister interface.
type taskRunLister struct {
listers.ResourceIndexer[*pipelinev1.TaskRun]
}
// NewTaskRunLister returns a new TaskRunLister.
func NewTaskRunLister(indexer cache.Indexer) TaskRunLister {
return &taskRunLister{listers.New[*pipelinev1.TaskRun](indexer, pipelinev1.Resource("taskrun"))}
}
// TaskRuns returns an object that can list and get TaskRuns.
func (s *taskRunLister) TaskRuns(namespace string) TaskRunNamespaceLister {
return taskRunNamespaceLister{listers.NewNamespaced[*pipelinev1.TaskRun](s.ResourceIndexer, namespace)}
}
// TaskRunNamespaceLister helps list and get TaskRuns.
// All objects returned here must be treated as read-only.
type TaskRunNamespaceLister interface {
// List lists all TaskRuns in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1.TaskRun, err error)
// Get retrieves the TaskRun from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1.TaskRun, error)
TaskRunNamespaceListerExpansion
}
// taskRunNamespaceLister implements the TaskRunNamespaceLister
// interface.
type taskRunNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1.TaskRun]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// RunLister helps list Runs.
// All objects returned here must be treated as read-only.
type RunLister interface {
// List lists all Runs in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1alpha1.Run, err error)
// Runs returns an object that can list and get Runs.
Runs(namespace string) RunNamespaceLister
RunListerExpansion
}
// runLister implements the RunLister interface.
type runLister struct {
listers.ResourceIndexer[*pipelinev1alpha1.Run]
}
// NewRunLister returns a new RunLister.
func NewRunLister(indexer cache.Indexer) RunLister {
return &runLister{listers.New[*pipelinev1alpha1.Run](indexer, pipelinev1alpha1.Resource("run"))}
}
// Runs returns an object that can list and get Runs.
func (s *runLister) Runs(namespace string) RunNamespaceLister {
return runNamespaceLister{listers.NewNamespaced[*pipelinev1alpha1.Run](s.ResourceIndexer, namespace)}
}
// RunNamespaceLister helps list and get Runs.
// All objects returned here must be treated as read-only.
type RunNamespaceLister interface {
// List lists all Runs in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1alpha1.Run, err error)
// Get retrieves the Run from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1alpha1.Run, error)
RunNamespaceListerExpansion
}
// runNamespaceLister implements the RunNamespaceLister
// interface.
type runNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1alpha1.Run]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// StepActionLister helps list StepActions.
// All objects returned here must be treated as read-only.
type StepActionLister interface {
// List lists all StepActions in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1alpha1.StepAction, err error)
// StepActions returns an object that can list and get StepActions.
StepActions(namespace string) StepActionNamespaceLister
StepActionListerExpansion
}
// stepActionLister implements the StepActionLister interface.
type stepActionLister struct {
listers.ResourceIndexer[*pipelinev1alpha1.StepAction]
}
// NewStepActionLister returns a new StepActionLister.
func NewStepActionLister(indexer cache.Indexer) StepActionLister {
return &stepActionLister{listers.New[*pipelinev1alpha1.StepAction](indexer, pipelinev1alpha1.Resource("stepaction"))}
}
// StepActions returns an object that can list and get StepActions.
func (s *stepActionLister) StepActions(namespace string) StepActionNamespaceLister {
return stepActionNamespaceLister{listers.NewNamespaced[*pipelinev1alpha1.StepAction](s.ResourceIndexer, namespace)}
}
// StepActionNamespaceLister helps list and get StepActions.
// All objects returned here must be treated as read-only.
type StepActionNamespaceLister interface {
// List lists all StepActions in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1alpha1.StepAction, err error)
// Get retrieves the StepAction from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1alpha1.StepAction, error)
StepActionNamespaceListerExpansion
}
// stepActionNamespaceLister implements the StepActionNamespaceLister
// interface.
type stepActionNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1alpha1.StepAction]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// VerificationPolicyLister helps list VerificationPolicies.
// All objects returned here must be treated as read-only.
type VerificationPolicyLister interface {
// List lists all VerificationPolicies in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1alpha1.VerificationPolicy, err error)
// VerificationPolicies returns an object that can list and get VerificationPolicies.
VerificationPolicies(namespace string) VerificationPolicyNamespaceLister
VerificationPolicyListerExpansion
}
// verificationPolicyLister implements the VerificationPolicyLister interface.
type verificationPolicyLister struct {
listers.ResourceIndexer[*pipelinev1alpha1.VerificationPolicy]
}
// NewVerificationPolicyLister returns a new VerificationPolicyLister.
func NewVerificationPolicyLister(indexer cache.Indexer) VerificationPolicyLister {
return &verificationPolicyLister{listers.New[*pipelinev1alpha1.VerificationPolicy](indexer, pipelinev1alpha1.Resource("verificationpolicy"))}
}
// VerificationPolicies returns an object that can list and get VerificationPolicies.
func (s *verificationPolicyLister) VerificationPolicies(namespace string) VerificationPolicyNamespaceLister {
return verificationPolicyNamespaceLister{listers.NewNamespaced[*pipelinev1alpha1.VerificationPolicy](s.ResourceIndexer, namespace)}
}
// VerificationPolicyNamespaceLister helps list and get VerificationPolicies.
// All objects returned here must be treated as read-only.
type VerificationPolicyNamespaceLister interface {
// List lists all VerificationPolicies in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1alpha1.VerificationPolicy, err error)
// Get retrieves the VerificationPolicy from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1alpha1.VerificationPolicy, error)
VerificationPolicyNamespaceListerExpansion
}
// verificationPolicyNamespaceLister implements the VerificationPolicyNamespaceLister
// interface.
type verificationPolicyNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1alpha1.VerificationPolicy]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1beta1
import (
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// CustomRunLister helps list CustomRuns.
// All objects returned here must be treated as read-only.
type CustomRunLister interface {
// List lists all CustomRuns in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.CustomRun, err error)
// CustomRuns returns an object that can list and get CustomRuns.
CustomRuns(namespace string) CustomRunNamespaceLister
CustomRunListerExpansion
}
// customRunLister implements the CustomRunLister interface.
type customRunLister struct {
listers.ResourceIndexer[*pipelinev1beta1.CustomRun]
}
// NewCustomRunLister returns a new CustomRunLister.
func NewCustomRunLister(indexer cache.Indexer) CustomRunLister {
return &customRunLister{listers.New[*pipelinev1beta1.CustomRun](indexer, pipelinev1beta1.Resource("customrun"))}
}
// CustomRuns returns an object that can list and get CustomRuns.
func (s *customRunLister) CustomRuns(namespace string) CustomRunNamespaceLister {
return customRunNamespaceLister{listers.NewNamespaced[*pipelinev1beta1.CustomRun](s.ResourceIndexer, namespace)}
}
// CustomRunNamespaceLister helps list and get CustomRuns.
// All objects returned here must be treated as read-only.
type CustomRunNamespaceLister interface {
// List lists all CustomRuns in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.CustomRun, err error)
// Get retrieves the CustomRun from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1beta1.CustomRun, error)
CustomRunNamespaceListerExpansion
}
// customRunNamespaceLister implements the CustomRunNamespaceLister
// interface.
type customRunNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1beta1.CustomRun]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1beta1
import (
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// PipelineLister helps list Pipelines.
// All objects returned here must be treated as read-only.
type PipelineLister interface {
// List lists all Pipelines in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.Pipeline, err error)
// Pipelines returns an object that can list and get Pipelines.
Pipelines(namespace string) PipelineNamespaceLister
PipelineListerExpansion
}
// pipelineLister implements the PipelineLister interface.
type pipelineLister struct {
listers.ResourceIndexer[*pipelinev1beta1.Pipeline]
}
// NewPipelineLister returns a new PipelineLister.
func NewPipelineLister(indexer cache.Indexer) PipelineLister {
return &pipelineLister{listers.New[*pipelinev1beta1.Pipeline](indexer, pipelinev1beta1.Resource("pipeline"))}
}
// Pipelines returns an object that can list and get Pipelines.
func (s *pipelineLister) Pipelines(namespace string) PipelineNamespaceLister {
return pipelineNamespaceLister{listers.NewNamespaced[*pipelinev1beta1.Pipeline](s.ResourceIndexer, namespace)}
}
// PipelineNamespaceLister helps list and get Pipelines.
// All objects returned here must be treated as read-only.
type PipelineNamespaceLister interface {
// List lists all Pipelines in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.Pipeline, err error)
// Get retrieves the Pipeline from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1beta1.Pipeline, error)
PipelineNamespaceListerExpansion
}
// pipelineNamespaceLister implements the PipelineNamespaceLister
// interface.
type pipelineNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1beta1.Pipeline]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1beta1
import (
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// PipelineRunLister helps list PipelineRuns.
// All objects returned here must be treated as read-only.
type PipelineRunLister interface {
// List lists all PipelineRuns in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.PipelineRun, err error)
// PipelineRuns returns an object that can list and get PipelineRuns.
PipelineRuns(namespace string) PipelineRunNamespaceLister
PipelineRunListerExpansion
}
// pipelineRunLister implements the PipelineRunLister interface.
type pipelineRunLister struct {
listers.ResourceIndexer[*pipelinev1beta1.PipelineRun]
}
// NewPipelineRunLister returns a new PipelineRunLister.
func NewPipelineRunLister(indexer cache.Indexer) PipelineRunLister {
return &pipelineRunLister{listers.New[*pipelinev1beta1.PipelineRun](indexer, pipelinev1beta1.Resource("pipelinerun"))}
}
// PipelineRuns returns an object that can list and get PipelineRuns.
func (s *pipelineRunLister) PipelineRuns(namespace string) PipelineRunNamespaceLister {
return pipelineRunNamespaceLister{listers.NewNamespaced[*pipelinev1beta1.PipelineRun](s.ResourceIndexer, namespace)}
}
// PipelineRunNamespaceLister helps list and get PipelineRuns.
// All objects returned here must be treated as read-only.
type PipelineRunNamespaceLister interface {
// List lists all PipelineRuns in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.PipelineRun, err error)
// Get retrieves the PipelineRun from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1beta1.PipelineRun, error)
PipelineRunNamespaceListerExpansion
}
// pipelineRunNamespaceLister implements the PipelineRunNamespaceLister
// interface.
type pipelineRunNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1beta1.PipelineRun]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1beta1
import (
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// StepActionLister helps list StepActions.
// All objects returned here must be treated as read-only.
type StepActionLister interface {
// List lists all StepActions in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.StepAction, err error)
// StepActions returns an object that can list and get StepActions.
StepActions(namespace string) StepActionNamespaceLister
StepActionListerExpansion
}
// stepActionLister implements the StepActionLister interface.
type stepActionLister struct {
listers.ResourceIndexer[*pipelinev1beta1.StepAction]
}
// NewStepActionLister returns a new StepActionLister.
func NewStepActionLister(indexer cache.Indexer) StepActionLister {
return &stepActionLister{listers.New[*pipelinev1beta1.StepAction](indexer, pipelinev1beta1.Resource("stepaction"))}
}
// StepActions returns an object that can list and get StepActions.
func (s *stepActionLister) StepActions(namespace string) StepActionNamespaceLister {
return stepActionNamespaceLister{listers.NewNamespaced[*pipelinev1beta1.StepAction](s.ResourceIndexer, namespace)}
}
// StepActionNamespaceLister helps list and get StepActions.
// All objects returned here must be treated as read-only.
type StepActionNamespaceLister interface {
// List lists all StepActions in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.StepAction, err error)
// Get retrieves the StepAction from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1beta1.StepAction, error)
StepActionNamespaceListerExpansion
}
// stepActionNamespaceLister implements the StepActionNamespaceLister
// interface.
type stepActionNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1beta1.StepAction]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1beta1
import (
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// TaskLister helps list Tasks.
// All objects returned here must be treated as read-only.
type TaskLister interface {
// List lists all Tasks in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.Task, err error)
// Tasks returns an object that can list and get Tasks.
Tasks(namespace string) TaskNamespaceLister
TaskListerExpansion
}
// taskLister implements the TaskLister interface.
type taskLister struct {
listers.ResourceIndexer[*pipelinev1beta1.Task]
}
// NewTaskLister returns a new TaskLister.
func NewTaskLister(indexer cache.Indexer) TaskLister {
return &taskLister{listers.New[*pipelinev1beta1.Task](indexer, pipelinev1beta1.Resource("task"))}
}
// Tasks returns an object that can list and get Tasks.
func (s *taskLister) Tasks(namespace string) TaskNamespaceLister {
return taskNamespaceLister{listers.NewNamespaced[*pipelinev1beta1.Task](s.ResourceIndexer, namespace)}
}
// TaskNamespaceLister helps list and get Tasks.
// All objects returned here must be treated as read-only.
type TaskNamespaceLister interface {
// List lists all Tasks in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.Task, err error)
// Get retrieves the Task from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1beta1.Task, error)
TaskNamespaceListerExpansion
}
// taskNamespaceLister implements the TaskNamespaceLister
// interface.
type taskNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1beta1.Task]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1beta1
import (
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// TaskRunLister helps list TaskRuns.
// All objects returned here must be treated as read-only.
type TaskRunLister interface {
// List lists all TaskRuns in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.TaskRun, err error)
// TaskRuns returns an object that can list and get TaskRuns.
TaskRuns(namespace string) TaskRunNamespaceLister
TaskRunListerExpansion
}
// taskRunLister implements the TaskRunLister interface.
type taskRunLister struct {
listers.ResourceIndexer[*pipelinev1beta1.TaskRun]
}
// NewTaskRunLister returns a new TaskRunLister.
func NewTaskRunLister(indexer cache.Indexer) TaskRunLister {
return &taskRunLister{listers.New[*pipelinev1beta1.TaskRun](indexer, pipelinev1beta1.Resource("taskrun"))}
}
// TaskRuns returns an object that can list and get TaskRuns.
func (s *taskRunLister) TaskRuns(namespace string) TaskRunNamespaceLister {
return taskRunNamespaceLister{listers.NewNamespaced[*pipelinev1beta1.TaskRun](s.ResourceIndexer, namespace)}
}
// TaskRunNamespaceLister helps list and get TaskRuns.
// All objects returned here must be treated as read-only.
type TaskRunNamespaceLister interface {
// List lists all TaskRuns in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*pipelinev1beta1.TaskRun, err error)
// Get retrieves the TaskRun from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*pipelinev1beta1.TaskRun, error)
TaskRunNamespaceListerExpansion
}
// taskRunNamespaceLister implements the TaskRunNamespaceLister
// interface.
type taskRunNamespaceLister struct {
listers.ResourceIndexer[*pipelinev1beta1.TaskRun]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
fmt "fmt"
http "net/http"
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1"
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
ResolutionV1alpha1() resolutionv1alpha1.ResolutionV1alpha1Interface
ResolutionV1beta1() resolutionv1beta1.ResolutionV1beta1Interface
}
// Clientset contains the clients for groups.
type Clientset struct {
*discovery.DiscoveryClient
resolutionV1alpha1 *resolutionv1alpha1.ResolutionV1alpha1Client
resolutionV1beta1 *resolutionv1beta1.ResolutionV1beta1Client
}
// ResolutionV1alpha1 retrieves the ResolutionV1alpha1Client
func (c *Clientset) ResolutionV1alpha1() resolutionv1alpha1.ResolutionV1alpha1Interface {
return c.resolutionV1alpha1
}
// ResolutionV1beta1 retrieves the ResolutionV1beta1Client
func (c *Clientset) ResolutionV1beta1() resolutionv1beta1.ResolutionV1beta1Interface {
return c.resolutionV1beta1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfig will generate a rate-limiter in configShallowCopy.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.UserAgent == "" {
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
}
// share the transport between all clients
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&configShallowCopy, httpClient)
}
// NewForConfigAndClient creates a new Clientset for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.resolutionV1alpha1, err = resolutionv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.resolutionV1beta1, err = resolutionv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
cs, err := NewForConfig(c)
if err != nil {
panic(err)
}
return cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.resolutionV1alpha1 = resolutionv1alpha1.New(c)
cs.resolutionV1beta1 = resolutionv1beta1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
clientset "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1"
fakeresolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/fake"
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1"
fakeresolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/testing"
)
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
//
// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves
// server side apply testing. NewClientset is only available when apply configurations are generated (e.g.
// via --with-applyconfig).
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{tracker: o}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
return cs
}
// Clientset implements clientset.Interface. Meant to be embedded into a
// struct to get a default implementation. This makes faking out just the method
// you want to test easier.
type Clientset struct {
testing.Fake
discovery *fakediscovery.FakeDiscovery
tracker testing.ObjectTracker
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.discovery
}
func (c *Clientset) Tracker() testing.ObjectTracker {
return c.tracker
}
var (
_ clientset.Interface = &Clientset{}
_ testing.FakeClient = &Clientset{}
)
// ResolutionV1alpha1 retrieves the ResolutionV1alpha1Client
func (c *Clientset) ResolutionV1alpha1() resolutionv1alpha1.ResolutionV1alpha1Interface {
return &fakeresolutionv1alpha1.FakeResolutionV1alpha1{Fake: &c.Fake}
}
// ResolutionV1beta1 retrieves the ResolutionV1beta1Client
func (c *Clientset) ResolutionV1beta1() resolutionv1beta1.ResolutionV1beta1Interface {
return &fakeresolutionv1beta1.FakeResolutionV1beta1{Fake: &c.Fake}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
resolutionv1alpha1.AddToScheme,
resolutionv1beta1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(scheme))
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
resolutionv1alpha1.AddToScheme,
resolutionv1beta1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeResolutionV1alpha1 struct {
*testing.Fake
}
func (c *FakeResolutionV1alpha1) ResolutionRequests(namespace string) v1alpha1.ResolutionRequestInterface {
return newFakeResolutionRequests(c, namespace)
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeResolutionV1alpha1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1"
gentype "k8s.io/client-go/gentype"
)
// fakeResolutionRequests implements ResolutionRequestInterface
type fakeResolutionRequests struct {
*gentype.FakeClientWithList[*v1alpha1.ResolutionRequest, *v1alpha1.ResolutionRequestList]
Fake *FakeResolutionV1alpha1
}
func newFakeResolutionRequests(fake *FakeResolutionV1alpha1, namespace string) resolutionv1alpha1.ResolutionRequestInterface {
return &fakeResolutionRequests{
gentype.NewFakeClientWithList[*v1alpha1.ResolutionRequest, *v1alpha1.ResolutionRequestList](
fake.Fake,
namespace,
v1alpha1.SchemeGroupVersion.WithResource("resolutionrequests"),
v1alpha1.SchemeGroupVersion.WithKind("ResolutionRequest"),
func() *v1alpha1.ResolutionRequest { return &v1alpha1.ResolutionRequest{} },
func() *v1alpha1.ResolutionRequestList { return &v1alpha1.ResolutionRequestList{} },
func(dst, src *v1alpha1.ResolutionRequestList) { dst.ListMeta = src.ListMeta },
func(list *v1alpha1.ResolutionRequestList) []*v1alpha1.ResolutionRequest {
return gentype.ToPointerSlice(list.Items)
},
func(list *v1alpha1.ResolutionRequestList, items []*v1alpha1.ResolutionRequest) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
http "net/http"
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
scheme "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type ResolutionV1alpha1Interface interface {
RESTClient() rest.Interface
ResolutionRequestsGetter
}
// ResolutionV1alpha1Client is used to interact with features provided by the resolution.tekton.dev group.
type ResolutionV1alpha1Client struct {
restClient rest.Interface
}
func (c *ResolutionV1alpha1Client) ResolutionRequests(namespace string) ResolutionRequestInterface {
return newResolutionRequests(c, namespace)
}
// NewForConfig creates a new ResolutionV1alpha1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*ResolutionV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new ResolutionV1alpha1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResolutionV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &ResolutionV1alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new ResolutionV1alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *ResolutionV1alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new ResolutionV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *ResolutionV1alpha1Client {
return &ResolutionV1alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := resolutionv1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *ResolutionV1alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
scheme "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// ResolutionRequestsGetter has a method to return a ResolutionRequestInterface.
// A group's client should implement this interface.
type ResolutionRequestsGetter interface {
ResolutionRequests(namespace string) ResolutionRequestInterface
}
// ResolutionRequestInterface has methods to work with ResolutionRequest resources.
type ResolutionRequestInterface interface {
Create(ctx context.Context, resolutionRequest *resolutionv1alpha1.ResolutionRequest, opts v1.CreateOptions) (*resolutionv1alpha1.ResolutionRequest, error)
Update(ctx context.Context, resolutionRequest *resolutionv1alpha1.ResolutionRequest, opts v1.UpdateOptions) (*resolutionv1alpha1.ResolutionRequest, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, resolutionRequest *resolutionv1alpha1.ResolutionRequest, opts v1.UpdateOptions) (*resolutionv1alpha1.ResolutionRequest, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*resolutionv1alpha1.ResolutionRequest, error)
List(ctx context.Context, opts v1.ListOptions) (*resolutionv1alpha1.ResolutionRequestList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resolutionv1alpha1.ResolutionRequest, err error)
ResolutionRequestExpansion
}
// resolutionRequests implements ResolutionRequestInterface
type resolutionRequests struct {
*gentype.ClientWithList[*resolutionv1alpha1.ResolutionRequest, *resolutionv1alpha1.ResolutionRequestList]
}
// newResolutionRequests returns a ResolutionRequests
func newResolutionRequests(c *ResolutionV1alpha1Client, namespace string) *resolutionRequests {
return &resolutionRequests{
gentype.NewClientWithList[*resolutionv1alpha1.ResolutionRequest, *resolutionv1alpha1.ResolutionRequestList](
"resolutionrequests",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *resolutionv1alpha1.ResolutionRequest { return &resolutionv1alpha1.ResolutionRequest{} },
func() *resolutionv1alpha1.ResolutionRequestList { return &resolutionv1alpha1.ResolutionRequestList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeResolutionV1beta1 struct {
*testing.Fake
}
func (c *FakeResolutionV1beta1) ResolutionRequests(namespace string) v1beta1.ResolutionRequestInterface {
return newFakeResolutionRequests(c, namespace)
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeResolutionV1beta1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1"
gentype "k8s.io/client-go/gentype"
)
// fakeResolutionRequests implements ResolutionRequestInterface
type fakeResolutionRequests struct {
*gentype.FakeClientWithList[*v1beta1.ResolutionRequest, *v1beta1.ResolutionRequestList]
Fake *FakeResolutionV1beta1
}
func newFakeResolutionRequests(fake *FakeResolutionV1beta1, namespace string) resolutionv1beta1.ResolutionRequestInterface {
return &fakeResolutionRequests{
gentype.NewFakeClientWithList[*v1beta1.ResolutionRequest, *v1beta1.ResolutionRequestList](
fake.Fake,
namespace,
v1beta1.SchemeGroupVersion.WithResource("resolutionrequests"),
v1beta1.SchemeGroupVersion.WithKind("ResolutionRequest"),
func() *v1beta1.ResolutionRequest { return &v1beta1.ResolutionRequest{} },
func() *v1beta1.ResolutionRequestList { return &v1beta1.ResolutionRequestList{} },
func(dst, src *v1beta1.ResolutionRequestList) { dst.ListMeta = src.ListMeta },
func(list *v1beta1.ResolutionRequestList) []*v1beta1.ResolutionRequest {
return gentype.ToPointerSlice(list.Items)
},
func(list *v1beta1.ResolutionRequestList, items []*v1beta1.ResolutionRequest) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
http "net/http"
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
scheme "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type ResolutionV1beta1Interface interface {
RESTClient() rest.Interface
ResolutionRequestsGetter
}
// ResolutionV1beta1Client is used to interact with features provided by the resolution.tekton.dev group.
type ResolutionV1beta1Client struct {
restClient rest.Interface
}
func (c *ResolutionV1beta1Client) ResolutionRequests(namespace string) ResolutionRequestInterface {
return newResolutionRequests(c, namespace)
}
// NewForConfig creates a new ResolutionV1beta1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*ResolutionV1beta1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new ResolutionV1beta1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResolutionV1beta1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &ResolutionV1beta1Client{client}, nil
}
// NewForConfigOrDie creates a new ResolutionV1beta1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *ResolutionV1beta1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new ResolutionV1beta1Client for the given RESTClient.
func New(c rest.Interface) *ResolutionV1beta1Client {
return &ResolutionV1beta1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := resolutionv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *ResolutionV1beta1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
scheme "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// ResolutionRequestsGetter has a method to return a ResolutionRequestInterface.
// A group's client should implement this interface.
type ResolutionRequestsGetter interface {
ResolutionRequests(namespace string) ResolutionRequestInterface
}
// ResolutionRequestInterface has methods to work with ResolutionRequest resources.
type ResolutionRequestInterface interface {
Create(ctx context.Context, resolutionRequest *resolutionv1beta1.ResolutionRequest, opts v1.CreateOptions) (*resolutionv1beta1.ResolutionRequest, error)
Update(ctx context.Context, resolutionRequest *resolutionv1beta1.ResolutionRequest, opts v1.UpdateOptions) (*resolutionv1beta1.ResolutionRequest, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, resolutionRequest *resolutionv1beta1.ResolutionRequest, opts v1.UpdateOptions) (*resolutionv1beta1.ResolutionRequest, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*resolutionv1beta1.ResolutionRequest, error)
List(ctx context.Context, opts v1.ListOptions) (*resolutionv1beta1.ResolutionRequestList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resolutionv1beta1.ResolutionRequest, err error)
ResolutionRequestExpansion
}
// resolutionRequests implements ResolutionRequestInterface
type resolutionRequests struct {
*gentype.ClientWithList[*resolutionv1beta1.ResolutionRequest, *resolutionv1beta1.ResolutionRequestList]
}
// newResolutionRequests returns a ResolutionRequests
func newResolutionRequests(c *ResolutionV1beta1Client, namespace string) *resolutionRequests {
return &resolutionRequests{
gentype.NewClientWithList[*resolutionv1beta1.ResolutionRequest, *resolutionv1beta1.ResolutionRequestList](
"resolutionrequests",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *resolutionv1beta1.ResolutionRequest { return &resolutionv1beta1.ResolutionRequest{} },
func() *resolutionv1beta1.ResolutionRequestList { return &resolutionv1beta1.ResolutionRequestList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces"
resolution "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
transform cache.TransformFunc
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
// wg tracks how many goroutines were started.
wg sync.WaitGroup
// shuttingDown is true when Shutdown has been called. It may still be running
// because it needs to wait for goroutines.
shuttingDown bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// WithTransform sets a transform on all informers.
func WithTransform(transform cache.TransformFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.transform = transform
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.shuttingDown {
return
}
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
f.wg.Add(1)
// We need a new variable in each loop iteration,
// otherwise the goroutine would use the loop variable
// and that keeps changing.
informer := informer
go func() {
defer f.wg.Done()
informer.Run(stopCh)
}()
f.startedInformers[informerType] = true
}
}
}
func (f *sharedInformerFactory) Shutdown() {
f.lock.Lock()
f.shuttingDown = true
f.lock.Unlock()
// Will return immediately if there is nothing to wait for.
f.wg.Wait()
}
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
informer.SetTransform(f.transform)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
//
// It is typically used like this:
//
// ctx, cancel := context.Background()
// defer cancel()
// factory := NewSharedInformerFactory(client, resyncPeriod)
// defer factory.WaitForStop() // Returns immediately if nothing was started.
// genericInformer := factory.ForResource(resource)
// typedInformer := factory.SomeAPIGroup().V1().SomeType()
// factory.Start(ctx.Done()) // Start processing these informers.
// synced := factory.WaitForCacheSync(ctx.Done())
// for v, ok := range synced {
// if !ok {
// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
// return
// }
// }
//
// // Creating informers can also be created after Start, but then
// // Start must be called again:
// anotherGenericInformer := factory.ForResource(resource)
// factory.Start(ctx.Done())
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
// Start initializes all requested informers. They are handled in goroutines
// which run until the stop channel gets closed.
// Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync.
Start(stopCh <-chan struct{})
// Shutdown marks a factory as shutting down. At that point no new
// informers can be started anymore and Start will return without
// doing anything.
//
// In addition, Shutdown blocks until all goroutines have terminated. For that
// to happen, the close channel(s) that they were started with must be closed,
// either before Shutdown gets called or while it is waiting.
//
// Shutdown may be called multiple times, even concurrently. All such calls will
// block until all goroutines have terminated.
Shutdown()
// WaitForCacheSync blocks until all started informers' caches were synced
// or the stop channel gets closed.
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
// ForResource gives generic access to a shared informer of the matching type.
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
Resolution() resolution.Interface
}
func (f *sharedInformerFactory) Resolution() resolution.Interface {
return resolution.New(f, f.namespace, f.tweakListOptions)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
fmt "fmt"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
// sharedInformers based on type
type GenericInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.GenericLister
}
type genericInformer struct {
informer cache.SharedIndexInformer
resource schema.GroupResource
}
// Informer returns the SharedIndexInformer.
func (f *genericInformer) Informer() cache.SharedIndexInformer {
return f.informer
}
// Lister returns the GenericLister.
func (f *genericInformer) Lister() cache.GenericLister {
return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
}
// ForResource gives generic access to a shared informer of the matching type
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=resolution.tekton.dev, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("resolutionrequests"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Resolution().V1alpha1().ResolutionRequests().Informer()}, nil
// Group=resolution.tekton.dev, Version=v1beta1
case v1beta1.SchemeGroupVersion.WithResource("resolutionrequests"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Resolution().V1beta1().ResolutionRequests().Informer()}, nil
}
return nil, fmt.Errorf("no informer found for %v", resource)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package resolution
import (
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
// V1beta1 provides access to shared informers for resources in V1beta1.
V1beta1() v1beta1.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}
// V1beta1 returns a new v1beta1.Interface.
func (g *group) V1beta1() v1beta1.Interface {
return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// ResolutionRequests returns a ResolutionRequestInformer.
ResolutionRequests() ResolutionRequestInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// ResolutionRequests returns a ResolutionRequestInformer.
func (v *version) ResolutionRequests() ResolutionRequestInformer {
return &resolutionRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
time "time"
apisresolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces"
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// ResolutionRequestInformer provides access to a shared informer and lister for
// ResolutionRequests.
type ResolutionRequestInformer interface {
Informer() cache.SharedIndexInformer
Lister() resolutionv1alpha1.ResolutionRequestLister
}
type resolutionRequestInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewResolutionRequestInformer constructs a new informer for ResolutionRequest type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewResolutionRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredResolutionRequestInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredResolutionRequestInformer constructs a new informer for ResolutionRequest type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResolutionRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ResolutionV1alpha1().ResolutionRequests(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ResolutionV1alpha1().ResolutionRequests(namespace).Watch(context.TODO(), options)
},
},
&apisresolutionv1alpha1.ResolutionRequest{},
resyncPeriod,
indexers,
)
}
func (f *resolutionRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredResolutionRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *resolutionRequestInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apisresolutionv1alpha1.ResolutionRequest{}, f.defaultInformer)
}
func (f *resolutionRequestInformer) Lister() resolutionv1alpha1.ResolutionRequestLister {
return resolutionv1alpha1.NewResolutionRequestLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// ResolutionRequests returns a ResolutionRequestInformer.
ResolutionRequests() ResolutionRequestInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// ResolutionRequests returns a ResolutionRequestInformer.
func (v *version) ResolutionRequests() ResolutionRequestInformer {
return &resolutionRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
time "time"
apisresolutionv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces"
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// ResolutionRequestInformer provides access to a shared informer and lister for
// ResolutionRequests.
type ResolutionRequestInformer interface {
Informer() cache.SharedIndexInformer
Lister() resolutionv1beta1.ResolutionRequestLister
}
type resolutionRequestInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewResolutionRequestInformer constructs a new informer for ResolutionRequest type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewResolutionRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredResolutionRequestInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredResolutionRequestInformer constructs a new informer for ResolutionRequest type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResolutionRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ResolutionV1beta1().ResolutionRequests(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ResolutionV1beta1().ResolutionRequests(namespace).Watch(context.TODO(), options)
},
},
&apisresolutionv1beta1.ResolutionRequest{},
resyncPeriod,
indexers,
)
}
func (f *resolutionRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredResolutionRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *resolutionRequestInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apisresolutionv1beta1.ResolutionRequest{}, f.defaultInformer)
}
func (f *resolutionRequestInformer) Lister() resolutionv1beta1.ResolutionRequestLister {
return resolutionv1beta1.NewResolutionRequestLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package client
import (
context "context"
versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
rest "k8s.io/client-go/rest"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterClient(withClientFromConfig)
injection.Default.RegisterClientFetcher(func(ctx context.Context) interface{} {
return Get(ctx)
})
}
// Key is used as the key for associating information with a context.Context.
type Key struct{}
func withClientFromConfig(ctx context.Context, cfg *rest.Config) context.Context {
return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg))
}
// Get extracts the versioned.Interface client from the context.
func Get(ctx context.Context) versioned.Interface {
untyped := ctx.Value(Key{})
if untyped == nil {
if injection.GetConfig(ctx) == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned.Interface from context. This context is not the application context (which is typically given to constructors via sharedmain).")
} else {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned.Interface from context.")
}
}
return untyped.(versioned.Interface)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake"
client "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client"
runtime "k8s.io/apimachinery/pkg/runtime"
rest "k8s.io/client-go/rest"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Fake.RegisterClient(withClient)
injection.Fake.RegisterClientFetcher(func(ctx context.Context) interface{} {
return Get(ctx)
})
}
func withClient(ctx context.Context, cfg *rest.Config) context.Context {
ctx, _ = With(ctx)
return ctx
}
func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) {
cs := fake.NewSimpleClientset(objects...)
return context.WithValue(ctx, client.Key{}, cs), cs
}
// Get extracts the Kubernetes client from the context.
func Get(ctx context.Context) *fake.Clientset {
untyped := ctx.Value(client.Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake.Clientset from context.")
}
return untyped.(*fake.Clientset)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package factory
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions"
client "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformerFactory(withInformerFactory)
}
// Key is used as the key for associating information with a context.Context.
type Key struct{}
func withInformerFactory(ctx context.Context) context.Context {
c := client.Get(ctx)
opts := make([]externalversions.SharedInformerOption, 0, 1)
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
return context.WithValue(ctx, Key{},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
// Get extracts the InformerFactory from the context.
func Get(ctx context.Context) externalversions.SharedInformerFactory {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions.SharedInformerFactory from context.")
}
return untyped.(externalversions.SharedInformerFactory)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions"
fake "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/fake"
factory "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = factory.Get
func init() {
injection.Fake.RegisterInformerFactory(withInformerFactory)
}
func withInformerFactory(ctx context.Context) context.Context {
c := fake.Get(ctx)
opts := make([]externalversions.SharedInformerOption, 0, 1)
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
return context.WithValue(ctx, factory.Key{},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fakeFilteredFactory
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions"
fake "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/fake"
filtered "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/filtered"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterInformerFactory(withInformerFactory)
}
func withInformerFactory(ctx context.Context) context.Context {
c := fake.Get(ctx)
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
for _, selector := range labelSelectors {
selectorVal := selector
opts := []externalversions.SharedInformerOption{}
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
opts = append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) {
l.LabelSelector = selectorVal
}))
ctx = context.WithValue(ctx, filtered.Key{Selector: selectorVal},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
return ctx
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filteredFactory
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions"
client "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformerFactory(withInformerFactory)
}
// Key is used as the key for associating information with a context.Context.
type Key struct {
Selector string
}
type LabelKey struct{}
func WithSelectors(ctx context.Context, selector ...string) context.Context {
return context.WithValue(ctx, LabelKey{}, selector)
}
func withInformerFactory(ctx context.Context) context.Context {
c := client.Get(ctx)
untyped := ctx.Value(LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
for _, selector := range labelSelectors {
selectorVal := selector
opts := []externalversions.SharedInformerOption{}
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
opts = append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) {
l.LabelSelector = selectorVal
}))
ctx = context.WithValue(ctx, Key{Selector: selectorVal},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
return ctx
}
// Get extracts the InformerFactory from the context.
func Get(ctx context.Context, selector string) externalversions.SharedInformerFactory {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions.SharedInformerFactory with selector %s from context.", selector)
}
return untyped.(externalversions.SharedInformerFactory)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/fake"
resolutionrequest "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1alpha1/resolutionrequest"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = resolutionrequest.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Resolution().V1alpha1().ResolutionRequests()
return context.WithValue(ctx, resolutionrequest.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1alpha1/resolutionrequest/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Resolution().V1alpha1().ResolutionRequests()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1"
filtered "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Resolution().V1alpha1().ResolutionRequests()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1alpha1.ResolutionRequestInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1.ResolutionRequestInformer with selector %s from context.", selector)
}
return untyped.(v1alpha1.ResolutionRequestInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package resolutionrequest
import (
context "context"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1"
factory "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Resolution().V1alpha1().ResolutionRequests()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1alpha1.ResolutionRequestInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1.ResolutionRequestInformer from context.")
}
return untyped.(v1alpha1.ResolutionRequestInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/fake"
resolutionrequest "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = resolutionrequest.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Resolution().V1beta1().ResolutionRequests()
return context.WithValue(ctx, resolutionrequest.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Resolution().V1beta1().ResolutionRequests()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1"
filtered "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Resolution().V1beta1().ResolutionRequests()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1beta1.ResolutionRequestInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1.ResolutionRequestInformer with selector %s from context.", selector)
}
return untyped.(v1beta1.ResolutionRequestInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package resolutionrequest
import (
context "context"
v1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1"
factory "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Resolution().V1beta1().ResolutionRequests()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1beta1.ResolutionRequestInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1.ResolutionRequestInformer from context.")
}
return untyped.(v1beta1.ResolutionRequestInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package resolutionrequest
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client"
resolutionrequest "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1alpha1/resolutionrequest"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "resolutionrequest-controller"
defaultFinalizerName = "resolutionrequests.resolution.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
resolutionrequestInformer := resolutionrequest.Get(ctx)
lister := resolutionrequestInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "resolution.tekton.dev.ResolutionRequest"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package resolutionrequest
import (
context "context"
json "encoding/json"
fmt "fmt"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1alpha1"
zap "go.uber.org/zap"
zapcore "go.uber.org/zap/zapcore"
v1 "k8s.io/api/core/v1"
equality "k8s.io/apimachinery/pkg/api/equality"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
kmp "knative.dev/pkg/kmp"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1alpha1.ResolutionRequest.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1alpha1.ResolutionRequest. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1alpha1.ResolutionRequest) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1alpha1.ResolutionRequest.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1alpha1.ResolutionRequest. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1alpha1.ResolutionRequest) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1alpha1.ResolutionRequest if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1alpha1.ResolutionRequest.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1alpha1.ResolutionRequest) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1alpha1.ResolutionRequest) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1alpha1.ResolutionRequest resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister resolutionv1alpha1.ResolutionRequestLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
// skipStatusUpdates configures whether or not this reconciler automatically updates
// the status of the reconciled resource.
skipStatusUpdates bool
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister resolutionv1alpha1.ResolutionRequestLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.ResolutionRequests(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
if !r.skipStatusUpdates {
reconciler.PreProcessReconcile(ctx, resource)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
if !r.skipStatusUpdates {
reconciler.PostProcessReconcile(ctx, resource, original)
}
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Synchronize the status.
switch {
case r.skipStatusUpdates:
// This reconciler implementation is configured to skip resource updates.
// This may mean this reconciler does not observe spec, but reconciles external changes.
case equality.Semantic.DeepEqual(original.Status, resource.Status):
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the injectionInformer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
case !s.isLeader:
// High-availability reconcilers may have many replicas watching the resource, but only
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
if err = r.updateStatus(ctx, logger, original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
return err
}
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1alpha1.ResolutionRequest, desired *v1alpha1.ResolutionRequest) error {
existing = existing.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
getter := r.Client.ResolutionV1alpha1().ResolutionRequests(desired.Namespace)
existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
if logger.Desugar().Core().Enabled(zapcore.DebugLevel) {
if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
logger.Debug("Updating status with: ", diff)
}
}
existing.Status = desired.Status
updater := r.Client.ResolutionV1alpha1().ResolutionRequests(existing.Namespace)
_, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{})
return err
})
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.ResolutionRequest, desiredFinalizers sets.Set[string]) (*v1alpha1.ResolutionRequest, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.ResolutionV1alpha1().ResolutionRequests(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1alpha1.ResolutionRequest) (*v1alpha1.ResolutionRequest, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1alpha1.ResolutionRequest, reconcileEvent reconciler.Event) (*v1alpha1.ResolutionRequest, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package resolutionrequest
import (
fmt "fmt"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1alpha1.ResolutionRequest) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package resolutionrequest
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme"
client "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client"
resolutionrequest "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "resolutionrequest-controller"
defaultFinalizerName = "resolutionrequests.resolution.tekton.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.ControllerOptions to be used by the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
resolutionrequestInformer := resolutionrequest.Get(ctx)
lister := resolutionrequestInformer.Lister()
var promoteFilterFunc func(obj interface{}) bool
var promoteFunc = func(bkt reconciler.Bucket) {}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
// Signal promotion event
promoteFunc(bkt)
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
if promoteFilterFunc != nil {
if ok := promoteFilterFunc(elt); !ok {
continue
}
}
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "resolution.tekton.dev.ResolutionRequest"),
)
impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
if opts.PromoteFilterFunc != nil {
promoteFilterFunc = opts.PromoteFilterFunc
}
if opts.PromoteFunc != nil {
promoteFunc = opts.PromoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package resolutionrequest
import (
context "context"
json "encoding/json"
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1"
zap "go.uber.org/zap"
zapcore "go.uber.org/zap/zapcore"
v1 "k8s.io/api/core/v1"
equality "k8s.io/apimachinery/pkg/api/equality"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
kmp "knative.dev/pkg/kmp"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.ResolutionRequest.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1beta1.ResolutionRequest. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1beta1.ResolutionRequest) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1beta1.ResolutionRequest.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1beta1.ResolutionRequest. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1beta1.ResolutionRequest) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1beta1.ResolutionRequest if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1beta1.ResolutionRequest.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1beta1.ResolutionRequest) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1beta1.ResolutionRequest) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1beta1.ResolutionRequest resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources.
Lister resolutionv1beta1.ResolutionRequestLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
// skipStatusUpdates configures whether or not this reconciler automatically updates
// the status of the reconciled resource.
skipStatusUpdates bool
}
// Check that our Reconciler implements controller.Reconciler.
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister resolutionv1beta1.ResolutionRequestLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatal("Up to one options struct is supported, found: ", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determine if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Error("Invalid resource key: ", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return controller.NewSkipKey(key)
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.ResolutionRequests(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing and call
// the ObserveDeletion handler if appropriate.
logger.Debugf("Resource %q no longer exists", key)
if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
return del.ObserveDeletion(ctx, types.NamespacedName{
Namespace: s.namespace,
Name: s.name,
})
}
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
if !r.skipStatusUpdates {
reconciler.PreProcessReconcile(ctx, resource)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
if !r.skipStatusUpdates {
reconciler.PostProcessReconcile(ctx, resource, original)
}
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Synchronize the status.
switch {
case r.skipStatusUpdates:
// This reconciler implementation is configured to skip resource updates.
// This may mean this reconciler does not observe spec, but reconciles external changes.
case equality.Semantic.DeepEqual(original.Status, resource.Status):
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the injectionInformer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
case !s.isLeader:
// High-availability reconcilers may have many replicas watching the resource, but only
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
if err = r.updateStatus(ctx, logger, original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
return err
}
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
if controller.IsSkipKey(reconcileEvent) {
// This is a wrapped error, don't emit an event.
} else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
// This is a wrapped error, don't emit an event.
} else {
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
}
return reconcileEvent
}
return nil
}
func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1beta1.ResolutionRequest, desired *v1beta1.ResolutionRequest) error {
existing = existing.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
getter := r.Client.ResolutionV1beta1().ResolutionRequests(desired.Namespace)
existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
if logger.Desugar().Core().Enabled(zapcore.DebugLevel) {
if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
logger.Debug("Updating status with: ", diff)
}
}
existing.Status = desired.Status
updater := r.Client.ResolutionV1beta1().ResolutionRequests(existing.Namespace)
_, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{})
return err
})
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1beta1.ResolutionRequest, desiredFinalizers sets.Set[string]) (*v1beta1.ResolutionRequest, error) {
// Don't modify the informers copy.
existing := resource.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.New[string](existing.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = sets.List(existingFinalizers)
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.ResolutionV1beta1().ResolutionRequests(resource.Namespace)
resourceName := resource.Name
updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return updated, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1beta1.ResolutionRequest) (*v1beta1.ResolutionRequest, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.ResolutionRequest, reconcileEvent reconciler.Event) (*v1beta1.ResolutionRequest, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.New[string](resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource, finalizers)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package resolutionrequest
import (
fmt "fmt"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
type state struct {
// key is the original reconciliation key from the queue.
key string
// namespace is the namespace split from the reconciliation key.
namespace string
// name is the name split from the reconciliation key.
name string
// reconciler is the reconciler.
reconciler Interface
// roi is the read only interface cast of the reconciler.
roi ReadOnlyInterface
// isROI (Read Only Interface) the reconciler only observes reconciliation.
isROI bool
// isLeader the instance of the reconciler is the elected leader.
isLeader bool
}
func newState(key string, r *reconcilerImpl) (*state, error) {
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, fmt.Errorf("invalid resource key: %s", key)
}
roi, isROI := r.reconciler.(ReadOnlyInterface)
isLeader := r.IsLeaderFor(types.NamespacedName{
Namespace: namespace,
Name: name,
})
return &state{
key: key,
namespace: namespace,
name: name,
reconciler: r.reconciler,
roi: roi,
isROI: isROI,
isLeader: isLeader,
}, nil
}
// isNotLeaderNorObserver checks to see if this reconciler with the current
// state is enabled to do any work or not.
// isNotLeaderNorObserver returns true when there is no work possible for the
// reconciler.
func (s *state) isNotLeaderNorObserver() bool {
if !s.isLeader && !s.isROI {
// If we are not the leader, and we don't implement the ReadOnly
// interface, then take a fast-path out.
return true
}
return false
}
func (s *state) reconcileMethodFor(o *v1beta1.ResolutionRequest) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
return reconciler.DoObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
return reconciler.DoFinalizeKind, fin.FinalizeKind
}
return "unknown", nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// ResolutionRequestLister helps list ResolutionRequests.
// All objects returned here must be treated as read-only.
type ResolutionRequestLister interface {
// List lists all ResolutionRequests in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*resolutionv1alpha1.ResolutionRequest, err error)
// ResolutionRequests returns an object that can list and get ResolutionRequests.
ResolutionRequests(namespace string) ResolutionRequestNamespaceLister
ResolutionRequestListerExpansion
}
// resolutionRequestLister implements the ResolutionRequestLister interface.
type resolutionRequestLister struct {
listers.ResourceIndexer[*resolutionv1alpha1.ResolutionRequest]
}
// NewResolutionRequestLister returns a new ResolutionRequestLister.
func NewResolutionRequestLister(indexer cache.Indexer) ResolutionRequestLister {
return &resolutionRequestLister{listers.New[*resolutionv1alpha1.ResolutionRequest](indexer, resolutionv1alpha1.Resource("resolutionrequest"))}
}
// ResolutionRequests returns an object that can list and get ResolutionRequests.
func (s *resolutionRequestLister) ResolutionRequests(namespace string) ResolutionRequestNamespaceLister {
return resolutionRequestNamespaceLister{listers.NewNamespaced[*resolutionv1alpha1.ResolutionRequest](s.ResourceIndexer, namespace)}
}
// ResolutionRequestNamespaceLister helps list and get ResolutionRequests.
// All objects returned here must be treated as read-only.
type ResolutionRequestNamespaceLister interface {
// List lists all ResolutionRequests in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*resolutionv1alpha1.ResolutionRequest, err error)
// Get retrieves the ResolutionRequest from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*resolutionv1alpha1.ResolutionRequest, error)
ResolutionRequestNamespaceListerExpansion
}
// resolutionRequestNamespaceLister implements the ResolutionRequestNamespaceLister
// interface.
type resolutionRequestNamespaceLister struct {
listers.ResourceIndexer[*resolutionv1alpha1.ResolutionRequest]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1beta1
import (
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// ResolutionRequestLister helps list ResolutionRequests.
// All objects returned here must be treated as read-only.
type ResolutionRequestLister interface {
// List lists all ResolutionRequests in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*resolutionv1beta1.ResolutionRequest, err error)
// ResolutionRequests returns an object that can list and get ResolutionRequests.
ResolutionRequests(namespace string) ResolutionRequestNamespaceLister
ResolutionRequestListerExpansion
}
// resolutionRequestLister implements the ResolutionRequestLister interface.
type resolutionRequestLister struct {
listers.ResourceIndexer[*resolutionv1beta1.ResolutionRequest]
}
// NewResolutionRequestLister returns a new ResolutionRequestLister.
func NewResolutionRequestLister(indexer cache.Indexer) ResolutionRequestLister {
return &resolutionRequestLister{listers.New[*resolutionv1beta1.ResolutionRequest](indexer, resolutionv1beta1.Resource("resolutionrequest"))}
}
// ResolutionRequests returns an object that can list and get ResolutionRequests.
func (s *resolutionRequestLister) ResolutionRequests(namespace string) ResolutionRequestNamespaceLister {
return resolutionRequestNamespaceLister{listers.NewNamespaced[*resolutionv1beta1.ResolutionRequest](s.ResourceIndexer, namespace)}
}
// ResolutionRequestNamespaceLister helps list and get ResolutionRequests.
// All objects returned here must be treated as read-only.
type ResolutionRequestNamespaceLister interface {
// List lists all ResolutionRequests in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*resolutionv1beta1.ResolutionRequest, err error)
// Get retrieves the ResolutionRequest from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*resolutionv1beta1.ResolutionRequest, error)
ResolutionRequestNamespaceListerExpansion
}
// resolutionRequestNamespaceLister implements the ResolutionRequestNamespaceLister
// interface.
type resolutionRequestNamespaceLister struct {
listers.ResourceIndexer[*resolutionv1beta1.ResolutionRequest]
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
fmt "fmt"
http "net/http"
tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface
}
// Clientset contains the clients for groups.
type Clientset struct {
*discovery.DiscoveryClient
tektonV1alpha1 *tektonv1alpha1.TektonV1alpha1Client
}
// TektonV1alpha1 retrieves the TektonV1alpha1Client
func (c *Clientset) TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface {
return c.tektonV1alpha1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfig will generate a rate-limiter in configShallowCopy.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.UserAgent == "" {
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
}
// share the transport between all clients
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&configShallowCopy, httpClient)
}
// NewForConfigAndClient creates a new Clientset for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.tektonV1alpha1, err = tektonv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
cs, err := NewForConfig(c)
if err != nil {
panic(err)
}
return cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.tektonV1alpha1 = tektonv1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
clientset "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned"
tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1"
faketektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/testing"
)
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
//
// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves
// server side apply testing. NewClientset is only available when apply configurations are generated (e.g.
// via --with-applyconfig).
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{tracker: o}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
return cs
}
// Clientset implements clientset.Interface. Meant to be embedded into a
// struct to get a default implementation. This makes faking out just the method
// you want to test easier.
type Clientset struct {
testing.Fake
discovery *fakediscovery.FakeDiscovery
tracker testing.ObjectTracker
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.discovery
}
func (c *Clientset) Tracker() testing.ObjectTracker {
return c.tracker
}
var (
_ clientset.Interface = &Clientset{}
_ testing.FakeClient = &Clientset{}
)
// TektonV1alpha1 retrieves the TektonV1alpha1Client
func (c *Clientset) TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface {
return &faketektonv1alpha1.FakeTektonV1alpha1{Fake: &c.Fake}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
tektonv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(scheme))
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
tektonv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1"
resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1"
gentype "k8s.io/client-go/gentype"
)
// fakePipelineResources implements PipelineResourceInterface
type fakePipelineResources struct {
*gentype.FakeClientWithList[*v1alpha1.PipelineResource, *v1alpha1.PipelineResourceList]
Fake *FakeTektonV1alpha1
}
func newFakePipelineResources(fake *FakeTektonV1alpha1, namespace string) resourcev1alpha1.PipelineResourceInterface {
return &fakePipelineResources{
gentype.NewFakeClientWithList[*v1alpha1.PipelineResource, *v1alpha1.PipelineResourceList](
fake.Fake,
namespace,
v1alpha1.SchemeGroupVersion.WithResource("pipelineresources"),
v1alpha1.SchemeGroupVersion.WithKind("PipelineResource"),
func() *v1alpha1.PipelineResource { return &v1alpha1.PipelineResource{} },
func() *v1alpha1.PipelineResourceList { return &v1alpha1.PipelineResourceList{} },
func(dst, src *v1alpha1.PipelineResourceList) { dst.ListMeta = src.ListMeta },
func(list *v1alpha1.PipelineResourceList) []*v1alpha1.PipelineResource {
return gentype.ToPointerSlice(list.Items)
},
func(list *v1alpha1.PipelineResourceList, items []*v1alpha1.PipelineResource) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeTektonV1alpha1 struct {
*testing.Fake
}
func (c *FakeTektonV1alpha1) PipelineResources(namespace string) v1alpha1.PipelineResourceInterface {
return newFakePipelineResources(c, namespace)
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeTektonV1alpha1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1"
scheme "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// PipelineResourcesGetter has a method to return a PipelineResourceInterface.
// A group's client should implement this interface.
type PipelineResourcesGetter interface {
PipelineResources(namespace string) PipelineResourceInterface
}
// PipelineResourceInterface has methods to work with PipelineResource resources.
type PipelineResourceInterface interface {
Create(ctx context.Context, pipelineResource *resourcev1alpha1.PipelineResource, opts v1.CreateOptions) (*resourcev1alpha1.PipelineResource, error)
Update(ctx context.Context, pipelineResource *resourcev1alpha1.PipelineResource, opts v1.UpdateOptions) (*resourcev1alpha1.PipelineResource, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha1.PipelineResource, error)
List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha1.PipelineResourceList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha1.PipelineResource, err error)
PipelineResourceExpansion
}
// pipelineResources implements PipelineResourceInterface
type pipelineResources struct {
*gentype.ClientWithList[*resourcev1alpha1.PipelineResource, *resourcev1alpha1.PipelineResourceList]
}
// newPipelineResources returns a PipelineResources
func newPipelineResources(c *TektonV1alpha1Client, namespace string) *pipelineResources {
return &pipelineResources{
gentype.NewClientWithList[*resourcev1alpha1.PipelineResource, *resourcev1alpha1.PipelineResourceList](
"pipelineresources",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *resourcev1alpha1.PipelineResource { return &resourcev1alpha1.PipelineResource{} },
func() *resourcev1alpha1.PipelineResourceList { return &resourcev1alpha1.PipelineResourceList{} },
),
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
http "net/http"
resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1"
scheme "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type TektonV1alpha1Interface interface {
RESTClient() rest.Interface
PipelineResourcesGetter
}
// TektonV1alpha1Client is used to interact with features provided by the tekton.dev group.
type TektonV1alpha1Client struct {
restClient rest.Interface
}
func (c *TektonV1alpha1Client) PipelineResources(namespace string) PipelineResourceInterface {
return newPipelineResources(c, namespace)
}
// NewForConfig creates a new TektonV1alpha1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*TektonV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new TektonV1alpha1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*TektonV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &TektonV1alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new TektonV1alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *TektonV1alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new TektonV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *TektonV1alpha1Client {
return &TektonV1alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := resourcev1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *TektonV1alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
versioned "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/internalinterfaces"
resource "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
transform cache.TransformFunc
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
// wg tracks how many goroutines were started.
wg sync.WaitGroup
// shuttingDown is true when Shutdown has been called. It may still be running
// because it needs to wait for goroutines.
shuttingDown bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// WithTransform sets a transform on all informers.
func WithTransform(transform cache.TransformFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.transform = transform
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.shuttingDown {
return
}
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
f.wg.Add(1)
// We need a new variable in each loop iteration,
// otherwise the goroutine would use the loop variable
// and that keeps changing.
informer := informer
go func() {
defer f.wg.Done()
informer.Run(stopCh)
}()
f.startedInformers[informerType] = true
}
}
}
func (f *sharedInformerFactory) Shutdown() {
f.lock.Lock()
f.shuttingDown = true
f.lock.Unlock()
// Will return immediately if there is nothing to wait for.
f.wg.Wait()
}
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
informer.SetTransform(f.transform)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
//
// It is typically used like this:
//
// ctx, cancel := context.Background()
// defer cancel()
// factory := NewSharedInformerFactory(client, resyncPeriod)
// defer factory.WaitForStop() // Returns immediately if nothing was started.
// genericInformer := factory.ForResource(resource)
// typedInformer := factory.SomeAPIGroup().V1().SomeType()
// factory.Start(ctx.Done()) // Start processing these informers.
// synced := factory.WaitForCacheSync(ctx.Done())
// for v, ok := range synced {
// if !ok {
// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
// return
// }
// }
//
// // Creating informers can also be created after Start, but then
// // Start must be called again:
// anotherGenericInformer := factory.ForResource(resource)
// factory.Start(ctx.Done())
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
// Start initializes all requested informers. They are handled in goroutines
// which run until the stop channel gets closed.
// Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync.
Start(stopCh <-chan struct{})
// Shutdown marks a factory as shutting down. At that point no new
// informers can be started anymore and Start will return without
// doing anything.
//
// In addition, Shutdown blocks until all goroutines have terminated. For that
// to happen, the close channel(s) that they were started with must be closed,
// either before Shutdown gets called or while it is waiting.
//
// Shutdown may be called multiple times, even concurrently. All such calls will
// block until all goroutines have terminated.
Shutdown()
// WaitForCacheSync blocks until all started informers' caches were synced
// or the stop channel gets closed.
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
// ForResource gives generic access to a shared informer of the matching type.
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
Tekton() resource.Interface
}
func (f *sharedInformerFactory) Tekton() resource.Interface {
return resource.New(f, f.namespace, f.tweakListOptions)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
fmt "fmt"
v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
// sharedInformers based on type
type GenericInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.GenericLister
}
type genericInformer struct {
informer cache.SharedIndexInformer
resource schema.GroupResource
}
// Informer returns the SharedIndexInformer.
func (f *genericInformer) Informer() cache.SharedIndexInformer {
return f.informer
}
// Lister returns the GenericLister.
func (f *genericInformer) Lister() cache.GenericLister {
return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
}
// ForResource gives generic access to a shared informer of the matching type
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=tekton.dev, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("pipelineresources"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().PipelineResources().Informer()}, nil
}
return nil, fmt.Errorf("no informer found for %v", resource)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package resource
import (
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// PipelineResources returns a PipelineResourceInformer.
PipelineResources() PipelineResourceInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// PipelineResources returns a PipelineResourceInformer.
func (v *version) PipelineResources() PipelineResourceInformer {
return &pipelineResourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
time "time"
apisresourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1"
versioned "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned"
internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/internalinterfaces"
resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/listers/resource/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// PipelineResourceInformer provides access to a shared informer and lister for
// PipelineResources.
type PipelineResourceInformer interface {
Informer() cache.SharedIndexInformer
Lister() resourcev1alpha1.PipelineResourceLister
}
type pipelineResourceInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewPipelineResourceInformer constructs a new informer for PipelineResource type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewPipelineResourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPipelineResourceInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredPipelineResourceInformer constructs a new informer for PipelineResource type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPipelineResourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1alpha1().PipelineResources(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.TektonV1alpha1().PipelineResources(namespace).Watch(context.TODO(), options)
},
},
&apisresourcev1alpha1.PipelineResource{},
resyncPeriod,
indexers,
)
}
func (f *pipelineResourceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPipelineResourceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *pipelineResourceInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apisresourcev1alpha1.PipelineResource{}, f.defaultInformer)
}
func (f *pipelineResourceInformer) Lister() resourcev1alpha1.PipelineResourceLister {
return resourcev1alpha1.NewPipelineResourceLister(f.Informer().GetIndexer())
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package client
import (
context "context"
versioned "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned"
rest "k8s.io/client-go/rest"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterClient(withClientFromConfig)
injection.Default.RegisterClientFetcher(func(ctx context.Context) interface{} {
return Get(ctx)
})
}
// Key is used as the key for associating information with a context.Context.
type Key struct{}
func withClientFromConfig(ctx context.Context, cfg *rest.Config) context.Context {
return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg))
}
// Get extracts the versioned.Interface client from the context.
func Get(ctx context.Context) versioned.Interface {
untyped := ctx.Value(Key{})
if untyped == nil {
if injection.GetConfig(ctx) == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned.Interface from context. This context is not the application context (which is typically given to constructors via sharedmain).")
} else {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned.Interface from context.")
}
}
return untyped.(versioned.Interface)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/fake"
client "github.com/tektoncd/pipeline/pkg/client/resource/injection/client"
runtime "k8s.io/apimachinery/pkg/runtime"
rest "k8s.io/client-go/rest"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Fake.RegisterClient(withClient)
injection.Fake.RegisterClientFetcher(func(ctx context.Context) interface{} {
return Get(ctx)
})
}
func withClient(ctx context.Context, cfg *rest.Config) context.Context {
ctx, _ = With(ctx)
return ctx
}
func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) {
cs := fake.NewSimpleClientset(objects...)
return context.WithValue(ctx, client.Key{}, cs), cs
}
// Get extracts the Kubernetes client from the context.
func Get(ctx context.Context) *fake.Clientset {
untyped := ctx.Value(client.Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/fake.Clientset from context.")
}
return untyped.(*fake.Clientset)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package factory
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions"
client "github.com/tektoncd/pipeline/pkg/client/resource/injection/client"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformerFactory(withInformerFactory)
}
// Key is used as the key for associating information with a context.Context.
type Key struct{}
func withInformerFactory(ctx context.Context) context.Context {
c := client.Get(ctx)
opts := make([]externalversions.SharedInformerOption, 0, 1)
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
return context.WithValue(ctx, Key{},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
// Get extracts the InformerFactory from the context.
func Get(ctx context.Context) externalversions.SharedInformerFactory {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions.SharedInformerFactory from context.")
}
return untyped.(externalversions.SharedInformerFactory)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions"
fake "github.com/tektoncd/pipeline/pkg/client/resource/injection/client/fake"
factory "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = factory.Get
func init() {
injection.Fake.RegisterInformerFactory(withInformerFactory)
}
func withInformerFactory(ctx context.Context) context.Context {
c := fake.Get(ctx)
opts := make([]externalversions.SharedInformerOption, 0, 1)
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
return context.WithValue(ctx, factory.Key{},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fakeFilteredFactory
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions"
fake "github.com/tektoncd/pipeline/pkg/client/resource/injection/client/fake"
filtered "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/factory/filtered"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterInformerFactory(withInformerFactory)
}
func withInformerFactory(ctx context.Context) context.Context {
c := fake.Get(ctx)
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
for _, selector := range labelSelectors {
selectorVal := selector
opts := []externalversions.SharedInformerOption{}
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
opts = append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) {
l.LabelSelector = selectorVal
}))
ctx = context.WithValue(ctx, filtered.Key{Selector: selectorVal},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
return ctx
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filteredFactory
import (
context "context"
externalversions "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions"
client "github.com/tektoncd/pipeline/pkg/client/resource/injection/client"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformerFactory(withInformerFactory)
}
// Key is used as the key for associating information with a context.Context.
type Key struct {
Selector string
}
type LabelKey struct{}
func WithSelectors(ctx context.Context, selector ...string) context.Context {
return context.WithValue(ctx, LabelKey{}, selector)
}
func withInformerFactory(ctx context.Context) context.Context {
c := client.Get(ctx)
untyped := ctx.Value(LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
for _, selector := range labelSelectors {
selectorVal := selector
opts := []externalversions.SharedInformerOption{}
if injection.HasNamespaceScope(ctx) {
opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
}
opts = append(opts, externalversions.WithTweakListOptions(func(l *v1.ListOptions) {
l.LabelSelector = selectorVal
}))
ctx = context.WithValue(ctx, Key{Selector: selectorVal},
externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
}
return ctx
}
// Get extracts the InformerFactory from the context.
func Get(ctx context.Context, selector string) externalversions.SharedInformerFactory {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions.SharedInformerFactory with selector %s from context.", selector)
}
return untyped.(externalversions.SharedInformerFactory)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
fake "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/factory/fake"
pipelineresource "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/resource/v1alpha1/pipelineresource"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = pipelineresource.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Tekton().V1alpha1().PipelineResources()
return context.WithValue(ctx, pipelineresource.Key{}, inf), inf.Informer()
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
factoryfiltered "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/factory/filtered"
filtered "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/resource/v1alpha1/pipelineresource/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init() {
injection.Fake.RegisterFilteredInformers(withInformer)
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Tekton().V1alpha1().PipelineResources()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1"
filtered "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Tekton().V1alpha1().PipelineResources()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1alpha1.PipelineResourceInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1.PipelineResourceInformer with selector %s from context.", selector)
}
return untyped.(v1alpha1.PipelineResourceInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package pipelineresource
import (
context "context"
v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1"
factory "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/factory"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterInformer(withInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct{}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := factory.Get(ctx)
inf := f.Tekton().V1alpha1().PipelineResources()
return context.WithValue(ctx, Key{}, inf), inf.Informer()
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context) v1alpha1.PipelineResourceInformer {
untyped := ctx.Value(Key{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1.PipelineResourceInformer from context.")
}
return untyped.(v1alpha1.PipelineResourceInformer)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
)
// PipelineResourceLister helps list PipelineResources.
// All objects returned here must be treated as read-only.
type PipelineResourceLister interface {
// List lists all PipelineResources in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*resourcev1alpha1.PipelineResource, err error)
// PipelineResources returns an object that can list and get PipelineResources.
PipelineResources(namespace string) PipelineResourceNamespaceLister
PipelineResourceListerExpansion
}
// pipelineResourceLister implements the PipelineResourceLister interface.
type pipelineResourceLister struct {
listers.ResourceIndexer[*resourcev1alpha1.PipelineResource]
}
// NewPipelineResourceLister returns a new PipelineResourceLister.
func NewPipelineResourceLister(indexer cache.Indexer) PipelineResourceLister {
return &pipelineResourceLister{listers.New[*resourcev1alpha1.PipelineResource](indexer, resourcev1alpha1.Resource("pipelineresource"))}
}
// PipelineResources returns an object that can list and get PipelineResources.
func (s *pipelineResourceLister) PipelineResources(namespace string) PipelineResourceNamespaceLister {
return pipelineResourceNamespaceLister{listers.NewNamespaced[*resourcev1alpha1.PipelineResource](s.ResourceIndexer, namespace)}
}
// PipelineResourceNamespaceLister helps list and get PipelineResources.
// All objects returned here must be treated as read-only.
type PipelineResourceNamespaceLister interface {
// List lists all PipelineResources in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*resourcev1alpha1.PipelineResource, err error)
// Get retrieves the PipelineResource from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*resourcev1alpha1.PipelineResource, error)
PipelineResourceNamespaceListerExpansion
}
// pipelineResourceNamespaceLister implements the PipelineResourceNamespaceLister
// interface.
type pipelineResourceNamespaceLister struct {
listers.ResourceIndexer[*resourcev1alpha1.PipelineResource]
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/substitution"
corev1 "k8s.io/api/core/v1"
)
// applyStepReplacements returns a StepContainer with variable interpolation applied.
func applyStepReplacements(step *v1.Step, stringReplacements map[string]string, arrayReplacements map[string][]string) {
c := step.ToK8sContainer()
applyContainerReplacements(c, stringReplacements, arrayReplacements)
step.SetContainerFields(*c)
}
// applySidecarReplacements returns a SidecarContainer with variable interpolation applied.
func applySidecarReplacements(sidecar *v1.Sidecar, stringReplacements map[string]string, arrayReplacements map[string][]string) {
c := sidecar.ToK8sContainer()
applyContainerReplacements(c, stringReplacements, arrayReplacements)
sidecar.SetContainerFields(*c)
}
func applyContainerReplacements(c *corev1.Container, stringReplacements map[string]string, arrayReplacements map[string][]string) {
c.Name = substitution.ApplyReplacements(c.Name, stringReplacements)
c.Image = substitution.ApplyReplacements(c.Image, stringReplacements)
c.ImagePullPolicy = corev1.PullPolicy(substitution.ApplyReplacements(string(c.ImagePullPolicy), stringReplacements))
// Use ApplyArrayReplacements here, as additional args may be added via an array parameter.
var newArgs []string
for _, a := range c.Args {
newArgs = append(newArgs, substitution.ApplyArrayReplacements(a, stringReplacements, arrayReplacements)...)
}
c.Args = newArgs
for ie, e := range c.Env {
c.Env[ie].Value = substitution.ApplyReplacements(e.Value, stringReplacements)
if c.Env[ie].ValueFrom != nil {
if e.ValueFrom.SecretKeyRef != nil {
c.Env[ie].ValueFrom.SecretKeyRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ValueFrom.SecretKeyRef.LocalObjectReference.Name, stringReplacements)
c.Env[ie].ValueFrom.SecretKeyRef.Key = substitution.ApplyReplacements(e.ValueFrom.SecretKeyRef.Key, stringReplacements)
}
if e.ValueFrom.ConfigMapKeyRef != nil {
c.Env[ie].ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name, stringReplacements)
c.Env[ie].ValueFrom.ConfigMapKeyRef.Key = substitution.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.Key, stringReplacements)
}
}
}
for ie, e := range c.EnvFrom {
c.EnvFrom[ie].Prefix = substitution.ApplyReplacements(e.Prefix, stringReplacements)
if e.ConfigMapRef != nil {
c.EnvFrom[ie].ConfigMapRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ConfigMapRef.LocalObjectReference.Name, stringReplacements)
}
if e.SecretRef != nil {
c.EnvFrom[ie].SecretRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.SecretRef.LocalObjectReference.Name, stringReplacements)
}
}
c.WorkingDir = substitution.ApplyReplacements(c.WorkingDir, stringReplacements)
// Use ApplyArrayReplacements here, as additional commands may be added via an array parameter.
var newCommand []string
for _, c := range c.Command {
newCommand = append(newCommand, substitution.ApplyArrayReplacements(c, stringReplacements, arrayReplacements)...)
}
c.Command = newCommand
for iv, v := range c.VolumeMounts {
c.VolumeMounts[iv].Name = substitution.ApplyReplacements(v.Name, stringReplacements)
c.VolumeMounts[iv].MountPath = substitution.ApplyReplacements(v.MountPath, stringReplacements)
c.VolumeMounts[iv].SubPath = substitution.ApplyReplacements(v.SubPath, stringReplacements)
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/substitution"
)
// ApplySidecarReplacements applies variable interpolation on a Sidecar.
func ApplySidecarReplacements(sidecar *v1.Sidecar, stringReplacements map[string]string, arrayReplacements map[string][]string) {
sidecar.Script = substitution.ApplyReplacements(sidecar.Script, stringReplacements)
applySidecarReplacements(sidecar, stringReplacements, arrayReplacements)
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/substitution"
)
// ApplyStepReplacements applies variable interpolation on a Step.
func ApplyStepReplacements(step *v1.Step, stringReplacements map[string]string, arrayReplacements map[string][]string) {
step.Script = substitution.ApplyReplacements(step.Script, stringReplacements)
step.OnError = (v1.OnErrorType)(substitution.ApplyReplacements(string(step.OnError), stringReplacements))
if step.StdoutConfig != nil {
step.StdoutConfig.Path = substitution.ApplyReplacements(step.StdoutConfig.Path, stringReplacements)
}
if step.StderrConfig != nil {
step.StderrConfig.Path = substitution.ApplyReplacements(step.StderrConfig.Path, stringReplacements)
}
step.When = step.When.ReplaceVariables(stringReplacements, arrayReplacements)
applyStepReplacements(step, stringReplacements, arrayReplacements)
}
// ApplyStepTemplateReplacements applies variable interpolation on a StepTemplate (aka a container)
func ApplyStepTemplateReplacements(stepTemplate *v1.StepTemplate, stringReplacements map[string]string, arrayReplacements map[string][]string) {
container := stepTemplate.ToK8sContainer()
applyContainerReplacements(container, stringReplacements, arrayReplacements)
stepTemplate.SetContainerFields(*container)
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package controller provides helper methods for external controllers for
// Custom Task types.
package controller
import (
"errors"
"net/http"
"strings"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
// IsWebhookTimeout checks if the error is due to a mutating admission webhook timeout.
// This function is used to determine if an error should trigger exponential backoff retry logic.
func IsWebhookTimeout(err error) bool {
var statusErr *apierrors.StatusError
if errors.As(err, &statusErr) {
return statusErr.ErrStatus.Code == http.StatusInternalServerError &&
strings.Contains(statusErr.ErrStatus.Message, "timeout")
}
return false
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package controller provides helper methods for external controllers for
// Custom Task types.
package controller
import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
listersbeta "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// FilterCustomRunRef returns a filter that can be passed to a CustomRun Informer, which
// filters out CustomRuns for apiVersion and kinds that a controller doesn't care
// about.
//
// For example, a controller impl that wants to be notified of updates to CustomRuns
// which reference a Task with apiVersion "example.dev/v0" and kind "Example":
//
// customruninformer.Get(ctx).Informer().AddEventHandler(cache.FilteringResourceEventHandler{
// FilterFunc: FilterCustomRunRef("example.dev/v0", "Example"),
// Handler: controller.HandleAll(impl.Enqueue),
// })
func FilterCustomRunRef(apiVersion, kind string) func(interface{}) bool {
return func(obj interface{}) bool {
r, ok := obj.(*v1beta1.CustomRun)
if !ok {
// Somehow got informed of a non-CustomRun object.
// Ignore.
return false
}
if r == nil || (r.Spec.CustomRef == nil && r.Spec.CustomSpec == nil) {
// These are invalid, but just in case they get
// created somehow, don't panic.
return false
}
result := false
if r.Spec.CustomRef != nil {
result = r.Spec.CustomRef.APIVersion == apiVersion && r.Spec.CustomRef.Kind == v1beta1.TaskKind(kind)
} else if r.Spec.CustomSpec != nil {
result = r.Spec.CustomSpec.APIVersion == apiVersion && r.Spec.CustomSpec.Kind == kind
}
return result
}
}
// FilterOwnerCustomRunRef returns a filter that can be passed to an Informer for any runtime object, which
// filters out objects that aren't controlled by a CustomRun that references a particular apiVersion and kind.
//
// For example, a controller impl that wants to be notified of updates to TaskRuns that are controlled by
// a CustomRun which references a custom task with apiVersion "example.dev/v0" and kind "Example":
//
// taskruninformer.Get(ctx).Informer().AddEventHandler(cache.FilteringResourceEventHandler{
// FilterFunc: FilterOwnerCustomRunRef("example.dev/v0", "Example"),
// Handler: controller.HandleAll(impl.Enqueue),
// })
func FilterOwnerCustomRunRef(customRunLister listersbeta.CustomRunLister, apiVersion, kind string) func(interface{}) bool {
return func(obj interface{}) bool {
object, ok := obj.(metav1.Object)
if !ok {
return false
}
owner := metav1.GetControllerOf(object)
if owner == nil {
return false
}
if owner.APIVersion != v1beta1.SchemeGroupVersion.String() || owner.Kind != pipeline.CustomRunControllerName {
// Not owned by a CustomRun
return false
}
run, err := customRunLister.CustomRuns(object.GetNamespace()).Get(owner.Name)
if err != nil {
return false
}
if run.Spec.CustomRef == nil && run.Spec.CustomSpec == nil {
// These are invalid, but just in case they get created somehow, don't panic.
return false
}
if run.Spec.CustomRef != nil && run.Spec.CustomSpec != nil {
// These are invalid.
return false
}
result := false
if run.Spec.CustomRef != nil {
result = run.Spec.CustomRef.APIVersion == apiVersion && run.Spec.CustomRef.Kind == v1beta1.TaskKind(kind)
} else if run.Spec.CustomSpec != nil {
result = run.Spec.CustomSpec.APIVersion == apiVersion && run.Spec.CustomSpec.Kind == kind
}
return result
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockercreds
import (
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/tektoncd/pipeline/pkg/credentials/common"
credmatcher "github.com/tektoncd/pipeline/pkg/credentials/matcher"
credwriter "github.com/tektoncd/pipeline/pkg/credentials/writer"
)
const annotationPrefix = "tekton.dev/docker-"
var (
config basicDocker
dockerConfig arrayArg
dockerCfg arrayArg
)
// AddFlags adds CLI flags that dockercreds supports to a given flag.FlagSet.
func AddFlags(flagSet *flag.FlagSet) {
flags(flagSet)
}
func flags(fs *flag.FlagSet) {
config = basicDocker{make(map[string]entry)}
dockerConfig = arrayArg{[]string{}}
dockerCfg = arrayArg{[]string{}}
fs.Var(&config, "basic-docker", "List of secret=url pairs.")
fs.Var(&dockerConfig, "docker-config", "Docker config.json secret file.")
fs.Var(&dockerCfg, "docker-cfg", "Docker .dockercfg secret file.")
}
// As the flag is read, this status is populated.
// basicDocker implements flag.Value
type basicDocker struct {
Entries map[string]entry `json:"auths"`
}
func (dc *basicDocker) String() string {
if dc == nil {
// According to flag.Value this can happen.
return ""
}
var urls []string
for k, v := range dc.Entries {
urls = append(urls, fmt.Sprintf("%s=%s", v.Secret, k))
}
return strings.Join(urls, ",")
}
// Set sets a secret for a URL from a value in the format of "secret=url"
func (dc *basicDocker) Set(value string) error {
parts := strings.Split(value, "=")
if len(parts) != 2 {
return fmt.Errorf("expect entries of the form secret=url, got: %v", value)
}
secret := parts[0]
url := parts[1]
if _, ok := dc.Entries[url]; ok {
return fmt.Errorf("multiple entries for url: %v", url)
}
e, err := newEntry(secret)
if err != nil {
return err
}
dc.Entries[url] = *e
return nil
}
type arrayArg struct {
Values []string
}
// Set adds a value to the arrayArg's value slice
func (aa *arrayArg) Set(value string) error {
aa.Values = append(aa.Values, value)
return nil
}
func (aa *arrayArg) String() string {
return strings.Join(aa.Values, ",")
}
type configFile struct {
Auth map[string]entry `json:"auths"`
}
type entry struct {
Secret string `json:"-"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth"`
Email string `json:"email,omitempty"`
}
func newEntry(secret string) (*entry, error) {
secretPath := credmatcher.VolumeName(secret)
ub, err := os.ReadFile(filepath.Join(secretPath, common.BasicAuthUsernameKey))
if err != nil {
return nil, err
}
username := string(ub)
pb, err := os.ReadFile(filepath.Join(secretPath, common.BasicAuthPasswordKey))
if err != nil {
return nil, err
}
password := string(pb)
return &entry{
Secret: secret,
Username: username,
Password: password,
Auth: base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))),
Email: "not@val.id",
}, nil
}
type basicDockerBuilder struct{}
// NewBuilder returns a new builder for Docker credentials.
func NewBuilder() interface {
credmatcher.Matcher
credwriter.Writer
} {
return &basicDockerBuilder{}
}
// MatchingAnnotations extracts flags for the credential helper
// from the supplied secret and returns a slice (of length 0 or
// greater) of applicable domains.
func (*basicDockerBuilder) MatchingAnnotations(secret credmatcher.Secret) []string {
var flags []string
switch credmatcher.GetSecretType(secret) {
case common.SecretTypeBasicAuth:
for _, v := range credwriter.SortAnnotations(secret.GetAnnotations(), annotationPrefix) {
flags = append(flags, fmt.Sprintf("-basic-docker=%s=%s", secret.GetName(), v))
}
case common.SecretTypeDockerConfigJson:
flags = append(flags, "-docker-config="+secret.GetName())
case common.SecretTypeDockercfg:
flags = append(flags, "-docker-cfg="+secret.GetName())
case common.SecretTypeOpaque, common.SecretTypeServiceAccountToken, common.SecretTypeSSHAuth, common.SecretTypeTLS, common.SecretTypeBootstrapToken:
fallthrough
default:
return flags
}
return flags
}
// Write builds a .docker/config.json file from a combination
// of kubernetes docker registry secrets and tekton docker
// secret entries and writes it to the given directory. If
// no entries exist then nothing will be written to disk.
func (*basicDockerBuilder) Write(directory string) error {
dockerDir := filepath.Join(directory, ".docker")
basicDocker := filepath.Join(dockerDir, "config.json")
cf := configFile{Auth: config.Entries}
auth := map[string]entry{}
for _, secretName := range dockerCfg.Values {
dockerConfigAuthMap, err := authsFromDockerCfg(secretName)
if err != nil {
return err
}
for k, v := range dockerConfigAuthMap {
auth[k] = v
}
}
for _, secretName := range dockerConfig.Values {
dockerConfigAuthMap, err := authsFromDockerConfig(secretName)
if err != nil {
return err
}
for k, v := range dockerConfigAuthMap {
auth[k] = v
}
}
for k, v := range config.Entries {
auth[k] = v
}
if len(auth) == 0 {
return nil
}
if err := os.MkdirAll(dockerDir, os.ModePerm); err != nil {
return err
}
cf.Auth = auth
content, err := json.Marshal(cf)
if err != nil {
return err
}
return os.WriteFile(basicDocker, content, 0o600)
}
func authsFromDockerCfg(secret string) (map[string]entry, error) {
secretPath := credmatcher.VolumeName(secret)
m := make(map[string]entry)
data, err := os.ReadFile(filepath.Join(secretPath, common.DockerConfigKey))
if err != nil {
return m, err
}
err = json.Unmarshal(data, &m)
return m, err
}
func authsFromDockerConfig(secret string) (map[string]entry, error) {
secretPath := credmatcher.VolumeName(secret)
m := make(map[string]entry)
c := configFile{}
data, err := os.ReadFile(filepath.Join(secretPath, common.DockerConfigJsonKey))
if err != nil {
return m, err
}
if err := json.Unmarshal(data, &c); err != nil {
return m, err
}
for k, v := range c.Auth {
m[k] = v
}
return m, nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gitcreds
import (
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/tektoncd/pipeline/pkg/credentials/common"
credmatcher "github.com/tektoncd/pipeline/pkg/credentials/matcher"
)
// As the flag is read, this status is populated.
// basicGitConfig implements flag.Value
type basicGitConfig struct {
entries map[string]basicEntry
// The order we see things, for iterating over the above.
order []string
}
func (dc *basicGitConfig) String() string {
if dc == nil {
// According to flag.Value this can happen.
return ""
}
var urls []string
for _, k := range dc.order {
v := dc.entries[k]
urls = append(urls, fmt.Sprintf("%s=%s", v.secret, k))
}
return strings.Join(urls, ",")
}
// Set sets a secret for a given URL from a "secret=url" value.
func (dc *basicGitConfig) Set(value string) error {
parts := strings.Split(value, "=")
if len(parts) != 2 {
return fmt.Errorf("expect entries of the form secret=url, got: %v", value)
}
secret := parts[0]
url := parts[1]
if _, ok := dc.entries[url]; ok {
return fmt.Errorf("multiple entries for url: %v", url)
}
e, err := newBasicEntry(url, secret)
if err != nil {
return err
}
dc.entries[url] = *e
dc.order = append(dc.order, url)
return nil
}
// Write builds a .gitconfig file from dc.entries and writes it to disk
// in the directory provided. If dc.entries is empty then nothing is
// written.
func (dc *basicGitConfig) Write(directory string) error {
if len(dc.entries) == 0 {
return nil
}
gitConfigPath := filepath.Join(directory, ".gitconfig")
gitConfigs := []string{
"[credential]\n helper = store\n",
}
for _, k := range dc.order {
v := dc.entries[k]
gitConfigs = append(gitConfigs, v.configBlurb(k))
}
gitConfigContent := strings.Join(gitConfigs, "")
if err := os.WriteFile(gitConfigPath, []byte(gitConfigContent), 0600); err != nil {
return err
}
gitCredentialsPath := filepath.Join(directory, ".git-credentials")
var gitCredentials []string
for _, k := range dc.order {
v := dc.entries[k]
gitCredentials = append(gitCredentials, v.authURL.String())
}
gitCredentials = append(gitCredentials, "") // Get a trailing newline
gitCredentialsContent := strings.Join(gitCredentials, "\n")
return os.WriteFile(gitCredentialsPath, []byte(gitCredentialsContent), 0600)
}
type basicEntry struct {
secret string
username string
password string
// Has the form: https://user:pass@url.com
authURL *url.URL
}
func (be *basicEntry) configBlurb(u string) string {
return fmt.Sprintf("[credential %q]\n username = %s\n", u, be.escapedUsername())
}
func (be *basicEntry) escapedUsername() string {
if strings.Contains(be.username, "\\") {
return strings.ReplaceAll(be.username, "\\", "\\\\")
}
return be.username
}
func newBasicEntry(u, secret string) (*basicEntry, error) {
secretPath := credmatcher.VolumeName(secret)
ub, err := os.ReadFile(filepath.Join(secretPath, common.BasicAuthUsernameKey))
if err != nil {
return nil, err
}
username := string(ub)
pb, err := os.ReadFile(filepath.Join(secretPath, common.BasicAuthPasswordKey))
if err != nil {
return nil, err
}
password := string(pb)
pu, err := url.Parse(u)
if err != nil {
return nil, err
}
pu.User = url.UserPassword(username, password)
return &basicEntry{
secret: secret,
username: username,
password: password,
authURL: pu,
}, nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gitcreds
import (
"flag"
"fmt"
"github.com/tektoncd/pipeline/pkg/credentials/common"
credmatcher "github.com/tektoncd/pipeline/pkg/credentials/matcher"
credwriter "github.com/tektoncd/pipeline/pkg/credentials/writer"
)
const (
annotationPrefix = "tekton.dev/git-"
basicAuthFlag = "basic-git"
sshFlag = "ssh-git"
)
var (
basicConfig basicGitConfig
sshConfig sshGitConfig
)
// AddFlags adds CLI flags that gitcreds supports to a given flag.FlagSet.
func AddFlags(flagSet *flag.FlagSet) {
flags(flagSet)
}
func flags(fs *flag.FlagSet) {
basicConfig = basicGitConfig{
entries: make(map[string]basicEntry),
order: []string{},
}
sshConfig = sshGitConfig{
entries: make(map[string][]sshEntry),
order: []string{},
}
fs.Var(&basicConfig, basicAuthFlag, "List of secret=url pairs.")
fs.Var(&sshConfig, sshFlag, "List of secret=url pairs.")
}
type gitBuilder struct{}
// NewBuilder returns a new builder for Git credentials.
func NewBuilder() interface {
credmatcher.Matcher
credwriter.Writer
} {
return &gitBuilder{}
}
// MatchingAnnotations extracts flags for the credential helper
// from the supplied secret and returns a slice (of length 0 or
// greater) of applicable domains.
func (*gitBuilder) MatchingAnnotations(secret credmatcher.Secret) []string {
var flagName string
var flags []string
switch credmatcher.GetSecretType(secret) {
case common.SecretTypeBasicAuth:
flagName = basicAuthFlag
case common.SecretTypeSSHAuth:
flagName = sshFlag
case common.SecretTypeOpaque, common.SecretTypeServiceAccountToken, common.SecretTypeDockercfg, common.SecretTypeDockerConfigJson, common.SecretTypeTLS, common.SecretTypeBootstrapToken:
fallthrough
default:
return flags
}
for _, v := range credwriter.SortAnnotations(secret.GetAnnotations(), annotationPrefix) {
flags = append(flags, fmt.Sprintf("-%s=%s=%s", flagName, secret.GetName(), v))
}
return flags
}
// Write writes the credentials to the provided directory.
func (*gitBuilder) Write(directory string) error {
if err := basicConfig.Write(directory); err != nil {
return err
}
return sshConfig.Write(directory)
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gitcreds
import (
"fmt"
"net"
"os"
"path/filepath"
"strings"
"github.com/tektoncd/pipeline/pkg/credentials/common"
credmatcher "github.com/tektoncd/pipeline/pkg/credentials/matcher"
)
const sshKnownHosts = "known_hosts"
// As the flag is read, this status is populated.
// sshGitConfig implements flag.Value
type sshGitConfig struct {
entries map[string][]sshEntry
// The order we see things, for iterating over the above.
order []string
}
func (dc *sshGitConfig) String() string {
if dc == nil {
// According to flag.Value this can happen.
return ""
}
var urls []string
for _, k := range dc.order {
for _, e := range dc.entries[k] {
urls = append(urls, fmt.Sprintf("%s=%s", e.secretName, k))
}
}
return strings.Join(urls, ",")
}
// Set sets a secret for a given URL from a "secret=url" value.
func (dc *sshGitConfig) Set(value string) error {
parts := strings.Split(value, "=")
if len(parts) != 2 {
return fmt.Errorf("expect entries of the form secret=url, got: %v", value)
}
secretName := parts[0]
url := parts[1]
e, err := newSSHEntry(url, secretName)
if err != nil {
return err
}
if _, exists := dc.entries[url]; !exists {
dc.order = append(dc.order, url)
}
dc.entries[url] = append(dc.entries[url], *e)
return nil
}
// Write puts dc's ssh entries into files in a .ssh directory, under
// the given directory. If dc has no entries then nothing is written.
func (dc *sshGitConfig) Write(directory string) error {
if len(dc.entries) == 0 {
return nil
}
sshDir := filepath.Join(directory, ".ssh")
if err := os.MkdirAll(sshDir, os.ModePerm); err != nil {
return err
}
// Walk each of the entries and for each do three things:
// 1. Write out: ~/.ssh/id_{secretName} with the secret key
// 2. Compute its part of "~/.ssh/config"
// 3. Compute its part of "~/.ssh/known_hosts"
var configEntries []string
var defaultPort = "22"
var knownHosts []string
for _, k := range dc.order {
var host, port string
var err error
if host, port, err = net.SplitHostPort(k); err != nil {
host = k
port = defaultPort
}
configEntry := fmt.Sprintf(`Host %s
HostName %s
Port %s
`, host, host, port)
for _, e := range dc.entries[k] {
if err := e.Write(sshDir); err != nil {
return err
}
configEntry += fmt.Sprintf(` IdentityFile %s
`, e.path(sshDir))
if e.knownHosts != "" {
knownHosts = append(knownHosts, e.knownHosts)
}
}
configEntries = append(configEntries, configEntry)
}
configPath := filepath.Join(sshDir, "config")
configContent := strings.Join(configEntries, "")
if err := os.WriteFile(configPath, []byte(configContent), 0600); err != nil {
return err
}
if len(knownHosts) > 0 {
knownHostsPath := filepath.Join(sshDir, "known_hosts")
knownHostsContent := strings.Join(knownHosts, "\n")
return os.WriteFile(knownHostsPath, []byte(knownHostsContent), 0600)
}
return nil
}
type sshEntry struct {
secretName string
privateKey string
knownHosts string
}
func (be *sshEntry) path(sshDir string) string {
return filepath.Join(sshDir, "id_"+be.secretName)
}
func (be *sshEntry) Write(sshDir string) error {
return os.WriteFile(be.path(sshDir), []byte(be.privateKey), 0600)
}
func newSSHEntry(url, secretName string) (*sshEntry, error) {
secretPath := credmatcher.VolumeName(secretName)
pk, err := os.ReadFile(filepath.Join(secretPath, common.SSHAuthPrivateKey))
if err != nil {
return nil, err
}
privateKey := string(pk)
knownHosts := ""
if kh, err := os.ReadFile(filepath.Join(secretPath, sshKnownHosts)); err == nil {
knownHosts = string(kh)
}
return &sshEntry{
secretName: secretName,
privateKey: privateKey,
knownHosts: knownHosts,
}, nil
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package matcher
import (
"fmt"
"reflect"
)
// VolumePath is the path where build secrets are written.
// It is mutable and exported for testing.
var VolumePath = "/tekton/creds-secrets"
// Secret is the minimal interface needed for credential matching
type Secret interface {
GetName() string
GetAnnotations() map[string]string
}
// Matcher is the interface for a credential initializer of any type.
type Matcher interface {
// MatchingAnnotations extracts flags for the credential
// helper from the supplied secret and returns a slice (of length 0 or greater)
MatchingAnnotations(secret Secret) []string
}
// VolumeName returns the full path to the secret, inside the VolumePath.
func VolumeName(secretName string) string {
return fmt.Sprintf("%s/%s", VolumePath, secretName)
}
// GetSecretType returns secret type from secret interface using reflection
func GetSecretType(secret Secret) string {
if secret == nil {
return ""
}
v := reflect.ValueOf(secret)
// If a pointer, check if it's nil before dereferencing
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return ""
}
v = v.Elem()
}
// access the Type field for Kubernetes secrets
f := v.FieldByName("Type")
if !f.IsValid() || !f.CanInterface() {
return ""
}
return fmt.Sprintf("%v", f.Interface())
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package writer
import (
"fmt"
"io"
"log"
"os"
"path/filepath"
"sort"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
)
const (
// credsDirPermissions are the persmission bits assigned to the directories
// copied out of the /tekton/creds and into a Step's HOME.
credsDirPermissions = 0o700
// credsFilePermissions are the persmission bits assigned to the files
// copied out of /tekton/creds and into a Step's HOME.
credsFilePermissions = 0o600
)
// CredsInitCredentials is the complete list of credentials that the legacy credentials
// helper (aka "creds-init") can write to /tekton/creds.
var CredsInitCredentials = []string{".docker", ".gitconfig", ".git-credentials", ".ssh"}
// Writer is the interface for a credential initializer of any type.
type Writer interface {
// Write writes the credentials to the provided directory.
Write(folder string) error
}
// SortAnnotations return sorted array of strings which has annotationPrefix
// as the prefix in secrets key
func SortAnnotations(secrets map[string]string, annotationPrefix string) []string {
var mk []string
for k, v := range secrets {
if strings.HasPrefix(k, annotationPrefix) {
mk = append(mk, v)
}
}
sort.Strings(mk)
return mk
}
// CopyCredsToHome copies credentials from the /tekton/creds directory into
// the current Step's HOME directory. A list of credential paths to try and
// copy is given as an arg, for example, []string{".docker", ".ssh"}. A missing
// /tekton/creds directory is not considered an error.
func CopyCredsToHome(credPaths []string) error {
if info, err := os.Stat(pipeline.CredsDir); err != nil || !info.IsDir() {
return nil //nolint:nilerr // safe to ignore error; no credentials available to copy
}
homepath, err := os.UserHomeDir()
if err != nil {
return fmt.Errorf("error getting the user's home directory: %w", err)
}
for _, cred := range credPaths {
source := filepath.Join(pipeline.CredsDir, cred)
destination := filepath.Join(homepath, cred)
err := tryCopyCred(source, destination)
if err != nil {
log.Printf("warning: unsuccessful cred copy: %q from %q to %q: %v", cred, pipeline.CredsDir, homepath, err)
}
}
return nil
}
// tryCopyCred will recursively copy a given source path to a given
// destination path. A missing source file is treated as normal behaviour
// and no error is returned.
func tryCopyCred(source, destination string) error {
fromInfo, err := os.Lstat(source)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("unable to read source file info: %w", err)
}
fromFile, err := os.Open(filepath.Clean(source))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("unable to open source: %w", err)
}
defer fromFile.Close()
if fromInfo.IsDir() {
err := os.MkdirAll(destination, credsDirPermissions)
if err != nil {
return fmt.Errorf("unable to create destination directory: %w", err)
}
subdirs, err := fromFile.Readdirnames(0)
if err != nil {
return fmt.Errorf("unable to read subdirectories of source: %w", err)
}
for _, subdir := range subdirs {
src := filepath.Join(source, subdir)
dst := filepath.Join(destination, subdir)
if err := tryCopyCred(src, dst); err != nil {
return err
}
}
} else {
flags := os.O_RDWR | os.O_CREATE | os.O_TRUNC
toFile, err := os.OpenFile(destination, flags, credsFilePermissions)
if err != nil {
return fmt.Errorf("unable to open destination: %w", err)
}
defer toFile.Close()
_, err = io.Copy(toFile, fromFile)
if err != nil {
return fmt.Errorf("error copying from source to destination: %w", err)
}
}
return nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package entrypoint
import (
"context"
"encoding/json"
"errors"
"fmt"
"log/slog"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"github.com/google/cel-go/cel"
"github.com/tektoncd/pipeline/internal/artifactref"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/types"
"github.com/tektoncd/pipeline/pkg/entrypoint/pipeline"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
"github.com/tektoncd/pipeline/pkg/result"
"github.com/tektoncd/pipeline/pkg/termination"
)
// RFC3339 with millisecond
const (
timeFormat = "2006-01-02T15:04:05.000Z07:00"
ContinueOnError = "continue"
FailOnError = "stopAndFail"
)
const (
breakpointExitSuffix = ".breakpointexit"
breakpointBeforeStepSuffix = ".beforestepexit"
ResultExtractionMethodTerminationMessage = "termination-message"
TerminationReasonSkipped = "Skipped"
TerminationReasonCancelled = "Cancelled"
TerminationReasonTimeoutExceeded = "TimeoutExceeded"
// DownwardMountCancelFile is cancellation file mount to step, entrypoint will check this file to cancel the step.
downwardMountPoint = "/tekton/downward"
downwardMountCancelFile = "cancel"
stepPrefix = "step-"
)
const (
// CredsDir is the directory where credentials are placed to meet the legacy credentials
// helpers image (aka "creds-init") contract
CredsDir = "/tekton/creds" // #nosec
)
var DownwardMountCancelFile string
func init() {
DownwardMountCancelFile = filepath.Join(downwardMountPoint, downwardMountCancelFile)
}
// DebugBeforeStepError is an error means mark before step breakpoint failure
type DebugBeforeStepError string
func (e DebugBeforeStepError) Error() string {
return string(e)
}
var (
errDebugBeforeStep = DebugBeforeStepError("before step breakpoint error file, user decided to skip the current step execution")
)
// ScriptDir for testing
var ScriptDir = pipeline.ScriptDir
// ContextError context error type
type ContextError string
// Error implements error interface
func (e ContextError) Error() string {
return string(e)
}
type SkipError string
func (e SkipError) Error() string {
return string(e)
}
var (
// ErrContextDeadlineExceeded is the error returned when the context deadline is exceeded
ErrContextDeadlineExceeded = ContextError(context.DeadlineExceeded.Error())
// ErrContextCanceled is the error returned when the context is canceled
ErrContextCanceled = ContextError(context.Canceled.Error())
// ErrSkipPreviousStepFailed is the error returned when the step is skipped due to previous step error
ErrSkipPreviousStepFailed = SkipError("error file present, bail and skip the step")
)
// IsContextDeadlineError determine whether the error is context deadline
func IsContextDeadlineError(err error) bool {
return errors.Is(err, ErrContextDeadlineExceeded)
}
// IsContextCanceledError determine whether the error is context canceled
func IsContextCanceledError(err error) bool {
return errors.Is(err, ErrContextCanceled)
}
// Entrypointer holds fields for running commands with redirected
// entrypoints.
type Entrypointer struct {
// Command is the original specified command and args.
Command []string
// WaitFiles is the set of files to wait for. If empty, execution
// begins immediately.
WaitFiles []string
// WaitFileContent indicates the WaitFile should have non-zero size
// before continuing with execution.
WaitFileContent bool
// PostFile is the file to write when complete. If not specified, no
// file is written.
PostFile string
// Termination path is the path of a file to write the starting time of this endpopint
TerminationPath string
// Waiter encapsulates waiting for files to exist.
Waiter Waiter
// Runner encapsulates running commands.
Runner Runner
// PostWriter encapsulates writing files when complete.
PostWriter PostWriter
// StepResults is the set of files that might contain step results
StepResults []string
// Results is the set of files that might contain task results
Results []string
// Timeout is an optional user-specified duration within which the Step must complete
Timeout *time.Duration
// BreakpointOnFailure helps determine if entrypoint execution needs to adapt debugging requirements
BreakpointOnFailure bool
// DebugBeforeStep help user attach container before execution
DebugBeforeStep bool
// OnError defines exiting behavior of the entrypoint
// set it to "stopAndFail" to indicate the entrypoint to exit the taskRun if the container exits with non zero exit code
// set it to "continue" to indicate the entrypoint to continue executing the rest of the steps irrespective of the container exit code
OnError string
// StepMetadataDir is the directory for a step where the step related metadata can be stored
StepMetadataDir string
// SpireWorkloadAPI connects to spire and does obtains SVID based on taskrun
SpireWorkloadAPI EntrypointerAPIClient
// ResultsDirectory is the directory to find results, defaults to pipeline.DefaultResultPath
ResultsDirectory string
// ResultExtractionMethod is the method using which the controller extracts the results from the task pod.
ResultExtractionMethod string
// StepWhenExpressions a list of when expression to decide if the step should be skipped
StepWhenExpressions v1.StepWhenExpressions
// ArtifactsDirectory is the directory to find artifacts, defaults to pipeline.ArtifactsDir
ArtifactsDirectory string
}
// Waiter encapsulates waiting for files to exist.
type Waiter interface {
// Wait blocks until the specified file exists or the context is done.
Wait(ctx context.Context, file string, expectContent bool, breakpointOnFailure bool) error
}
// Runner encapsulates running commands.
type Runner interface {
Run(ctx context.Context, args ...string) error
}
// PostWriter encapsulates writing a file when complete.
type PostWriter interface {
// Write writes to the path when complete.
Write(file, content string)
}
// Go optionally waits for a file, runs the command, and writes a
// post file.
func (e Entrypointer) Go() error {
output := []result.RunResult{}
defer func() {
if wErr := termination.WriteMessage(e.TerminationPath, output); wErr != nil {
log.Fatalf("Error while writing message: %s", wErr)
}
}()
if err := os.MkdirAll(filepath.Join(e.StepMetadataDir, "results"), os.ModePerm); err != nil {
return err
}
if err := os.MkdirAll(filepath.Join(e.StepMetadataDir, "artifacts"), os.ModePerm); err != nil {
return err
}
for _, f := range e.WaitFiles {
if err := e.Waiter.Wait(context.Background(), f, e.WaitFileContent, e.BreakpointOnFailure); err != nil {
// An error happened while waiting, so we bail
// *but* we write postfile to make next steps bail too.
// In case of breakpoint on failure do not write post file.
if !e.BreakpointOnFailure {
e.WritePostFile(e.PostFile, err)
}
output = append(output, result.RunResult{
Key: "StartedAt",
Value: time.Now().Format(timeFormat),
ResultType: result.InternalTektonResultType,
})
if errors.Is(err, ErrSkipPreviousStepFailed) {
output = append(output, e.outputRunResult(TerminationReasonSkipped))
}
return err
}
}
var err error
if e.DebugBeforeStep {
err = e.waitBeforeStepDebug()
}
output = append(output, result.RunResult{
Key: "StartedAt",
Value: time.Now().Format(timeFormat),
ResultType: result.InternalTektonResultType,
})
if e.Timeout != nil && *e.Timeout < time.Duration(0) {
err = errors.New("negative timeout specified")
}
ctx := context.Background()
var cancel context.CancelFunc
if err == nil {
if err := e.applyStepResultSubstitutions(pipeline.StepsDir); err != nil {
slog.Error("Error while substituting step results:", slog.Any("error", err))
}
if err := e.applyStepArtifactSubstitutions(pipeline.StepsDir); err != nil {
slog.Error("Error while substituting step artifacts:", slog.Any("error", err))
}
ctx, cancel = context.WithCancel(ctx)
if e.Timeout != nil && *e.Timeout > time.Duration(0) {
ctx, cancel = context.WithTimeout(ctx, *e.Timeout)
}
defer cancel()
// start a goroutine to listen for cancellation file
go func() {
if err := e.waitingCancellation(ctx, cancel); err != nil && (!IsContextCanceledError(err) && !IsContextDeadlineError(err)) {
slog.Error("Error while waiting for cancellation", slog.Any("error", err))
}
}()
allowExec, err1 := e.allowExec()
switch {
case err1 != nil:
err = err1
case allowExec:
err = e.Runner.Run(ctx, e.Command...)
default:
slog.Info("Step was skipped due to when expressions were evaluated to false.")
output = append(output, e.outputRunResult(TerminationReasonSkipped))
e.WritePostFile(e.PostFile, nil)
e.WriteExitCodeFile(e.StepMetadataDir, "0")
return nil
}
}
var ee *exec.ExitError
switch {
case err != nil && errors.Is(err, errDebugBeforeStep):
e.WritePostFile(e.PostFile, err)
case err != nil && errors.Is(err, ErrContextCanceled):
slog.Info("Step was canceling")
output = append(output, e.outputRunResult(TerminationReasonCancelled))
e.WritePostFile(e.PostFile, ErrContextCanceled)
e.WriteExitCodeFile(e.StepMetadataDir, syscall.SIGKILL.String())
case errors.Is(err, ErrContextDeadlineExceeded):
e.WritePostFile(e.PostFile, err)
output = append(output, e.outputRunResult(TerminationReasonTimeoutExceeded))
case err != nil && e.BreakpointOnFailure:
slog.Info("Skipping writing to PostFile")
case e.OnError == ContinueOnError && errors.As(err, &ee):
// with continue on error and an ExitError, write non-zero exit code and a post file
exitCode := strconv.Itoa(ee.ExitCode())
output = append(output, result.RunResult{
Key: "ExitCode",
Value: exitCode,
ResultType: result.InternalTektonResultType,
})
e.WritePostFile(e.PostFile, nil)
e.WriteExitCodeFile(e.StepMetadataDir, exitCode)
case err == nil:
// if err is nil, write zero exit code and a post file
e.WritePostFile(e.PostFile, nil)
e.WriteExitCodeFile(e.StepMetadataDir, "0")
default:
// for a step without continue on error and any error, write a post file with .err
e.WritePostFile(e.PostFile, err)
}
// strings.Split(..) with an empty string returns an array that contains one element, an empty string.
// This creates an error when trying to open the result folder as a file.
if len(e.Results) >= 1 && e.Results[0] != "" {
resultPath := pipeline.DefaultResultPath
if e.ResultsDirectory != "" {
resultPath = e.ResultsDirectory
}
if err := e.readResultsFromDisk(ctx, resultPath, result.TaskRunResultType); err != nil {
slog.Error("Error while substituting step artifacts:", slog.Any("error", err))
return err
}
}
if len(e.StepResults) >= 1 && e.StepResults[0] != "" {
stepResultPath := filepath.Join(e.StepMetadataDir, "results")
if e.ResultsDirectory != "" {
stepResultPath = e.ResultsDirectory
}
if err := e.readResultsFromDisk(ctx, stepResultPath, result.StepResultType); err != nil {
slog.Error("Error while substituting step artifacts:", slog.Any("error", err))
return err
}
}
if e.ResultExtractionMethod == ResultExtractionMethodTerminationMessage {
e.appendArtifactOutputs(&output)
}
return err
}
func readArtifacts(fp string, resultType result.ResultType) ([]result.RunResult, error) {
file, err := os.ReadFile(fp)
if os.IsNotExist(err) {
return []result.RunResult{}, nil
}
if err != nil {
return nil, err
}
return []result.RunResult{{Key: fp, Value: string(file), ResultType: resultType}}, nil
}
func (e Entrypointer) appendArtifactOutputs(output *[]result.RunResult) {
// step artifacts
fp := filepath.Join(e.StepMetadataDir, "artifacts", "provenance.json")
artifacts, err := readArtifacts(fp, result.StepArtifactsResultType)
if err != nil {
log.Fatalf("Error while handling step artifacts: %s", err)
}
*output = append(*output, artifacts...)
artifactsDir := pipeline.ArtifactsDir
// task artifacts
if e.ArtifactsDirectory != "" {
artifactsDir = e.ArtifactsDirectory
}
fp = filepath.Join(artifactsDir, "provenance.json")
artifacts, err = readArtifacts(fp, result.TaskRunArtifactsResultType)
if err != nil {
log.Fatalf("Error while handling task artifacts: %s", err)
}
*output = append(*output, artifacts...)
}
func (e Entrypointer) allowExec() (bool, error) {
when := e.StepWhenExpressions
m := map[string]bool{}
for _, we := range when {
if we.CEL == "" {
continue
}
b, ok := m[we.CEL]
if ok && !b {
return false, nil
}
env, err := cel.NewEnv()
if err != nil {
return false, err
}
ast, iss := env.Compile(we.CEL)
if iss.Err() != nil {
return false, iss.Err()
}
// Generate an evaluable instance of the Ast within the environment
prg, err := env.Program(ast)
if err != nil {
return false, err
}
// Evaluate the CEL expression
out, _, err := prg.Eval(map[string]interface{}{})
if err != nil {
return false, err
}
b, ok = out.Value().(bool)
if !ok {
return false, fmt.Errorf("the CEL expression %s is not evaluated to a boolean", we.CEL)
}
if !b {
return false, err
}
m[we.CEL] = true
}
return when.AllowsExecution(m), nil
}
func (e Entrypointer) waitBeforeStepDebug() error {
log.Println(`debug before step breakpoint has taken effect, waiting for user's decision:
1) continue, use cmd: /tekton/debug/scripts/debug-beforestep-continue
2) fail-continue, use cmd: /tekton/debug/scripts/debug-beforestep-fail-continue`)
breakpointBeforeStepPostFile := e.PostFile + breakpointBeforeStepSuffix
if waitErr := e.Waiter.Wait(context.Background(), breakpointBeforeStepPostFile, false, false); waitErr != nil {
log.Println("error occurred while waiting for " + breakpointBeforeStepPostFile + " : " + errDebugBeforeStep.Error())
return errDebugBeforeStep
}
return nil
}
func (e Entrypointer) readResultsFromDisk(ctx context.Context, resultDir string, resultType result.ResultType) error {
output := []result.RunResult{}
results := e.Results
if resultType == result.StepResultType {
results = e.StepResults
}
for _, resultFile := range results {
if resultFile == "" {
continue
}
fileContents, err := os.ReadFile(filepath.Join(resultDir, resultFile))
if os.IsNotExist(err) {
continue
} else if err != nil {
return err
}
// if the file doesn't exist, ignore it
output = append(output, result.RunResult{
Key: resultFile,
Value: string(fileContents),
ResultType: resultType,
})
}
signed, err := signResults(ctx, e.SpireWorkloadAPI, output)
if err != nil {
return err
}
output = append(output, signed...)
// push output to termination path
if e.ResultExtractionMethod == ResultExtractionMethodTerminationMessage && len(output) != 0 {
if err := termination.WriteMessage(e.TerminationPath, output); err != nil {
return err
}
}
return nil
}
// BreakpointExitCode reads the post file and returns the exit code it contains
func (e Entrypointer) BreakpointExitCode(breakpointExitPostFile string) (int, error) {
exitCode, err := os.ReadFile(breakpointExitPostFile)
if os.IsNotExist(err) {
return 0, fmt.Errorf("breakpoint postfile %s not found", breakpointExitPostFile)
}
strExitCode := strings.TrimSuffix(string(exitCode), "\n")
log.Println("Breakpoint exiting with exit code " + strExitCode)
return strconv.Atoi(strExitCode)
}
// WritePostFile write the postfile
func (e Entrypointer) WritePostFile(postFile string, err error) {
if err != nil && postFile != "" {
postFile += ".err"
}
if postFile != "" {
e.PostWriter.Write(postFile, "")
}
}
// WriteExitCodeFile write the exitCodeFile
func (e Entrypointer) WriteExitCodeFile(stepPath, content string) {
exitCodeFile := filepath.Join(stepPath, "exitCode")
e.PostWriter.Write(exitCodeFile, content)
}
// waitingCancellation waiting cancellation file, if no error occurs, call cancelFunc to cancel the context
func (e Entrypointer) waitingCancellation(ctx context.Context, cancel context.CancelFunc) error {
if err := e.Waiter.Wait(ctx, DownwardMountCancelFile, true, false); err != nil {
return err
}
cancel()
return nil
}
// CheckForBreakpointOnFailure if step up breakpoint on failure
// waiting breakpointExitPostFile to be written
func (e Entrypointer) CheckForBreakpointOnFailure() {
if e.BreakpointOnFailure {
log.Println(`debug onFailure breakpoint has taken effect, waiting for user's decision:
1) continue, use cmd: /tekton/debug/scripts/debug-continue
2) fail-continue, use cmd: /tekton/debug/scripts/debug-fail-continue`)
breakpointExitPostFile := e.PostFile + breakpointExitSuffix
if waitErr := e.Waiter.Wait(context.Background(), breakpointExitPostFile, false, false); waitErr != nil {
log.Println("error occurred while waiting for " + breakpointExitPostFile + " : " + waitErr.Error())
}
// get exitcode from .breakpointexit
exitCode, readErr := e.BreakpointExitCode(breakpointExitPostFile)
// if readErr exists, the exitcode with default to 0 as we would like
// to encourage to continue running the next steps in the taskRun
if readErr != nil {
log.Println("error occurred while reading breakpoint exit code : " + readErr.Error())
}
os.Exit(exitCode)
}
}
// GetContainerName prefixes the input name with "step-"
func GetContainerName(name string) string {
return fmt.Sprintf("%s%s", stepPrefix, name)
}
// loadStepResult reads the step result file and returns the string, array or object result value.
func loadStepResult(stepDir string, stepName string, resultName string) (v1.ResultValue, error) {
v := v1.ResultValue{}
fp := getStepResultPath(stepDir, GetContainerName(stepName), resultName)
fileContents, err := os.ReadFile(fp)
if err != nil {
return v, err
}
err = v.UnmarshalJSON(fileContents)
if err != nil {
return v, err
}
return v, nil
}
// getStepResultPath gets the path to the step result
func getStepResultPath(stepDir string, stepName string, resultName string) string {
return filepath.Join(stepDir, stepName, "results", resultName)
}
// findReplacement looks for any usage of step results in an input string.
// If found, it loads the results from the previous steps and provides the replacement value.
func findReplacement(stepDir string, s string) (string, []string, error) {
value := strings.TrimSuffix(strings.TrimPrefix(s, "$("), ")")
pr, err := resultref.ParseStepExpression(value)
if err != nil {
return "", nil, err
}
result, err := loadStepResult(stepDir, pr.ResourceName, pr.ResultName)
if err != nil {
return "", nil, err
}
replaceWithArray := []string{}
replaceWithString := ""
switch pr.ResultType {
case "object":
if pr.ObjectKey != "" {
replaceWithString = result.ObjectVal[pr.ObjectKey]
}
case "array":
if pr.ArrayIdx != nil {
replaceWithString = result.ArrayVal[*pr.ArrayIdx]
} else {
replaceWithArray = append(replaceWithArray, result.ArrayVal...)
}
// "string"
default:
replaceWithString = result.StringVal
}
return replaceWithString, replaceWithArray, nil
}
// replaceEnv performs replacements for step results in environment variables.
func replaceEnv(stepDir string) error {
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
matches := resultref.StepResultRegex.FindAllStringSubmatch(pair[1], -1)
v := pair[1]
for _, m := range matches {
replaceWith, _, err := findReplacement(stepDir, m[0])
if err != nil {
return err
}
v = strings.ReplaceAll(v, m[0], replaceWith)
}
os.Setenv(pair[0], v)
}
return nil
}
// replaceCommandAndArgs performs replacements for step results in e.Command
func replaceCommandAndArgs(command []string, stepDir string) ([]string, error) {
var newCommand []string
for _, c := range command {
matches := resultref.StepResultRegex.FindAllStringSubmatch(c, -1)
newC := []string{c}
for _, m := range matches {
replaceWithString, replaceWithArray, err := findReplacement(stepDir, m[0])
if err != nil {
return []string{}, fmt.Errorf("failed to find replacement for %s to replace %s", m[0], c)
}
// replaceWithString and replaceWithArray are mutually exclusive
if len(replaceWithArray) > 0 {
if c != m[0] {
// it has to be exact in "$(steps.<step-name>.results.<result-name>[*])" format, without anything else in the original string
return nil, errors.New("value must be in \"$(steps.<step-name>.results.<result-name>[*])\" format, when using array results")
}
newC = replaceWithArray
} else {
newC[0] = strings.ReplaceAll(newC[0], m[0], replaceWithString)
}
}
newCommand = append(newCommand, newC...)
}
return newCommand, nil
}
// applyStepResultSubstitutions applies the runtime step result substitutions in env, args and command.
func (e *Entrypointer) applyStepResultSubstitutions(stepDir string) error {
// env
if err := replaceEnv(stepDir); err != nil {
return err
}
// replace when
newWhen, err := replaceWhen(stepDir, e.StepWhenExpressions)
if err != nil {
return err
}
e.StepWhenExpressions = newWhen
// command + args
newCommand, err := replaceCommandAndArgs(e.Command, stepDir)
if err != nil {
return err
}
e.Command = newCommand
return nil
}
func replaceWhen(stepDir string, when v1.StepWhenExpressions) (v1.StepWhenExpressions, error) {
for i, w := range when {
var newValues []string
flag:
for _, v := range when[i].Values {
matches := resultref.StepResultRegex.FindAllStringSubmatch(v, -1)
newV := v
for _, m := range matches {
replaceWithString, replaceWithArray, err := findReplacement(stepDir, m[0])
if err != nil {
return v1.WhenExpressions{}, err
}
// replaceWithString and replaceWithArray are mutually exclusive
if len(replaceWithArray) > 0 {
if v != m[0] {
// it has to be exact in "$(steps.<step-name>.results.<result-name>[*])" format, without anything else in the original string
return nil, errors.New("value must be in \"$(steps.<step-name>.results.<result-name>[*])\" format, when using array results")
}
newValues = append(newValues, replaceWithArray...)
continue flag
}
newV = strings.ReplaceAll(newV, m[0], replaceWithString)
}
newValues = append(newValues, newV)
}
when[i].Values = newValues
matches := resultref.StepResultRegex.FindAllStringSubmatch(w.Input, -1)
v := when[i].Input
for _, m := range matches {
replaceWith, _, err := findReplacement(stepDir, m[0])
if err != nil {
return v1.StepWhenExpressions{}, err
}
v = strings.ReplaceAll(v, m[0], replaceWith)
}
when[i].Input = v
matches = resultref.StepResultRegex.FindAllStringSubmatch(w.CEL, -1)
c := when[i].CEL
for _, m := range matches {
replaceWith, _, err := findReplacement(stepDir, m[0])
if err != nil {
return v1.StepWhenExpressions{}, err
}
c = strings.ReplaceAll(c, m[0], replaceWith)
}
when[i].CEL = c
}
return when, nil
}
// outputRunResult returns the run reason for a termination
func (e Entrypointer) outputRunResult(terminationReason string) result.RunResult {
return result.RunResult{
Key: "Reason",
Value: terminationReason,
ResultType: result.InternalTektonResultType,
}
}
// getStepArtifactsPath gets the path to the step artifacts
func getStepArtifactsPath(stepDir string, containerName string) string {
return filepath.Join(stepDir, containerName, "artifacts", "provenance.json")
}
// loadStepArtifacts loads and parses the artifacts file for a specified step.
func loadStepArtifacts(stepDir string, containerName string) (v1.Artifacts, error) {
v := v1.Artifacts{}
fp := getStepArtifactsPath(stepDir, containerName)
fileContents, err := os.ReadFile(fp)
if err != nil {
return v, err
}
err = json.Unmarshal(fileContents, &v)
if err != nil {
return v, err
}
return v, nil
}
// getArtifactValues retrieves the values associated with a specified artifact reference.
// It parses the provided artifact template, loads the corresponding step's artifacts, and extracts the relevant values.
// If the artifact name is not specified in the template, the values of the first output are returned.
func getArtifactValues(dir string, template string) (string, error) {
artifactTemplate, err := parseArtifactTemplate(template)
if err != nil {
return "", err
}
artifacts, err := loadStepArtifacts(dir, artifactTemplate.ContainerName)
if err != nil {
return "", err
}
// $(steps.stepName.outputs.artifactName) <- artifacts.Output[artifactName].Values
var t []v1.Artifact
if artifactTemplate.Type == "outputs" {
t = artifacts.Outputs
} else {
t = artifacts.Inputs
}
for _, ar := range t {
if ar.Name == artifactTemplate.ArtifactName {
marshal, err := json.Marshal(ar.Values)
if err != nil {
return "", err
}
return string(marshal), err
}
}
return "", fmt.Errorf("values for template %s not found", template)
}
// parseArtifactTemplate parses an artifact template string and extracts relevant information into an ArtifactTemplate struct.
// The artifact template is expected to be in the format "$(steps.<step-name>.outputs.<artifact-category-name>)".
func parseArtifactTemplate(template string) (ArtifactTemplate, error) {
if template == "" {
return ArtifactTemplate{}, errors.New("template is empty")
}
if artifactref.StepArtifactRegex.FindString(template) != template {
return ArtifactTemplate{}, fmt.Errorf("invalid artifact template %s", template)
}
template = strings.TrimSuffix(strings.TrimPrefix(template, "$("), ")")
split := strings.Split(template, ".")
at := ArtifactTemplate{
ContainerName: "step-" + split[1],
Type: split[2],
}
if len(split) == 4 {
at.ArtifactName = split[3]
}
return at, nil
}
// ArtifactTemplate holds steps artifacts metadata parsed from step artifacts interpolation
type ArtifactTemplate struct {
ContainerName string
Type string // inputs or outputs
ArtifactName string
}
// applyStepArtifactSubstitutions replaces artifact references within a step's command and environment variables with their corresponding values.
//
// This function is designed to handle artifact substitutions in a script file, inline command, or environment variables.
//
// Args:
//
// stepDir: The directory of the executing step.
//
// Returns:
//
// An error object if any issues occur during substitution.
func (e *Entrypointer) applyStepArtifactSubstitutions(stepDir string) error {
// Script was re-written into a file, we need to read the file to and substitute the content
// and re-write the command.
// While param substitution cannot be used in Script from StepAction, allowing artifact substitution doesn't seem bad as
// artifacts are unmarshalled, should be safe.
if len(e.Command) == 1 && filepath.Dir(e.Command[0]) == filepath.Clean(ScriptDir) {
dataBytes, err := os.ReadFile(e.Command[0])
if err != nil {
return err
}
fileContent := string(dataBytes)
v, err := replaceValue(artifactref.StepArtifactRegex, fileContent, stepDir, getArtifactValues)
if err != nil {
return err
}
if v != fileContent {
temp, err := writeToTempFile(v)
if err != nil {
return err
}
e.Command = []string{temp.Name()}
}
} else {
command := e.Command
var newCmd []string
for _, c := range command {
v, err := replaceValue(artifactref.StepArtifactRegex, c, stepDir, getArtifactValues)
if err != nil {
return err
}
newCmd = append(newCmd, v)
}
e.Command = newCmd
}
// substitute env
for _, e := range os.Environ() {
pair := strings.SplitN(e, "=", 2)
v, err := replaceValue(artifactref.StepArtifactRegex, pair[1], stepDir, getArtifactValues)
if err != nil {
return err
}
os.Setenv(pair[0], v)
}
return nil
}
func writeToTempFile(v string) (*os.File, error) {
tmp, err := os.CreateTemp("", "script-*")
if err != nil {
return nil, err
}
err = os.Chmod(tmp.Name(), 0o755)
if err != nil {
return nil, err
}
_, err = tmp.WriteString(v)
if err != nil {
return nil, err
}
err = tmp.Close()
if err != nil {
return nil, err
}
return tmp, nil
}
func replaceValue(regex *regexp.Regexp, src string, stepDir string, getValue func(string, string) (string, error)) (string, error) {
matches := regex.FindAllStringSubmatch(src, -1)
t := src
for _, m := range matches {
v, err := getValue(stepDir, m[0])
if err != nil {
return "", err
}
t = strings.ReplaceAll(t, m[0], v)
}
return t, nil
}
//go:build !disable_spire
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package entrypoint
import (
"context"
"github.com/tektoncd/pipeline/pkg/result"
"github.com/tektoncd/pipeline/pkg/spire"
)
// EntrypointerAPIClient defines the interface for SPIRE operations
type EntrypointerAPIClient interface {
spire.EntrypointerAPIClient
}
func signResults(ctx context.Context, api EntrypointerAPIClient, results []result.RunResult) ([]result.RunResult, error) {
if api == nil {
return nil, nil
}
return api.Sign(ctx, results)
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package affinityassistant
import (
"context"
"fmt"
"github.com/tektoncd/pipeline/pkg/pod"
"github.com/tektoncd/pipeline/pkg/apis/config"
)
type AffinityAssistantBehavior string
const (
AffinityAssistantDisabled = AffinityAssistantBehavior("AffinityAssistantDisabled")
AffinityAssistantPerWorkspace = AffinityAssistantBehavior("AffinityAssistantPerWorkspace")
AffinityAssistantPerPipelineRun = AffinityAssistantBehavior("AffinityAssistantPerPipelineRun")
AffinityAssistantPerPipelineRunWithIsolation = AffinityAssistantBehavior("AffinityAssistantPerPipelineRunWithIsolation")
)
// GetAffinityAssistantBehavior returns an AffinityAssistantBehavior based on the "coschedule" feature flags
func GetAffinityAssistantBehavior(ctx context.Context) (AffinityAssistantBehavior, error) {
cfg := config.FromContextOrDefaults(ctx)
coschedule := cfg.FeatureFlags.Coschedule
switch coschedule {
case config.CoschedulePipelineRuns:
return AffinityAssistantPerPipelineRun, nil
case config.CoscheduleIsolatePipelineRun:
return AffinityAssistantPerPipelineRunWithIsolation, nil
case config.CoscheduleWorkspaces:
return AffinityAssistantPerWorkspace, nil
case config.CoscheduleDisabled:
return AffinityAssistantDisabled, nil
}
return "", fmt.Errorf("unknown affinity assistant coschedule: %v", coschedule)
}
// ContainerConfig defines AffinityAssistant container configuration
type ContainerConfig struct {
Image string
SecurityContextConfig pod.SecurityContextConfig
}
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package affinityassistant
import (
"context"
"github.com/tektoncd/pipeline/pkg/pod"
"github.com/tektoncd/pipeline/pkg/workspace"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NewTransformer returns a pod.Transformer that will pod affinity if needed
func NewTransformer(_ context.Context, annotations map[string]string) pod.Transformer {
return func(p *corev1.Pod) (*corev1.Pod, error) {
// Using node affinity on taskRuns sharing PVC workspace. When Affinity Assistant
// is disabled, an affinityAssistantName is not set.
if affinityAssistantName := annotations[workspace.AnnotationAffinityAssistantName]; affinityAssistantName != "" {
if p.Spec.Affinity == nil {
p.Spec.Affinity = &corev1.Affinity{}
}
mergeAffinityWithAffinityAssistant(p.Spec.Affinity, affinityAssistantName)
}
return p, nil
}
}
func mergeAffinityWithAffinityAssistant(affinity *corev1.Affinity, affinityAssistantName string) {
podAffinityTerm := podAffinityTermUsingAffinityAssistant(affinityAssistantName)
if affinity.PodAffinity == nil {
affinity.PodAffinity = &corev1.PodAffinity{}
}
affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution =
append(affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, *podAffinityTerm)
}
// podAffinityTermUsingAffinityAssistant achieves pod Affinity term for taskRun
// pods so that the taskRun is scheduled to the Node where the Affinity Assistant pod
// is scheduled.
func podAffinityTermUsingAffinityAssistant(affinityAssistantName string) *corev1.PodAffinityTerm {
return &corev1.PodAffinityTerm{LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
workspace.LabelInstance: affinityAssistantName,
workspace.LabelComponent: workspace.ComponentNameAffinityAssistant,
},
},
TopologyKey: "kubernetes.io/hostname",
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compare
import (
"github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
// IsZero returns true if the resource quantity has a zero value
func IsZero(q resource.Quantity) bool {
return (&q).IsZero()
}
// MaxRequest returns the largest resource request
// A zero request is considered the smallest request
func MaxRequest(quantities ...resource.Quantity) resource.Quantity {
max := resource.Quantity{}
for _, q := range quantities {
if q.Cmp(max) > 0 {
max = q
}
}
return max
}
// MinLimit returns the smallest resource limit
// A zero limit is considered higher than any other resource limit.
func MinLimit(quantities ...resource.Quantity) resource.Quantity {
min := resource.Quantity{}
for _, q := range quantities {
if min.IsZero() {
min = q
} else if q.Cmp(min) < 0 {
min = q
}
}
return min
}
// ResourceQuantityCmp allows resource quantities to be compared in tests
var ResourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool {
return x.Cmp(y) == 0
})
func equateAlways(_, _ interface{}) bool { return true }
// EquateEmptyResourceList returns a comparison option that will equate resource lists
// if neither contains non-empty resource quantities.
func EquateEmptyResourceList() cmp.Option {
return cmp.FilterValues(func(x, y corev1.ResourceList) bool { return IsEmpty(x) && IsEmpty(y) }, cmp.Comparer(equateAlways))
}
// IsEmpty returns false if the ResourceList contains non-empty resource quantities.
func IsEmpty(x corev1.ResourceList) bool {
if len(x) == 0 {
return true
}
for _, q := range x {
if !q.IsZero() {
return false
}
}
return true
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package limitrange
import (
"github.com/tektoncd/pipeline/pkg/internal/computeresources/compare"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
corev1listers "k8s.io/client-go/listers/core/v1"
)
// GetVirtualLimitRange returns a pointer to a single LimitRange representing the most restrictive
// requirements of all LimitRanges present in the namespace, or a nil pointer if there are no LimitRanges.
// This LimitRange meets the following constraints:
// - Its max is the smallest max of all the LimitRanges
// - Its min is the largest min of all the LimitRanges
// - Its maxLimitRequestRatio is the smallest maxLimitRequestRatio of all the LimitRanges
// - Its default is the smallest default of any of the LimitRanges that fits within the minimum and maximum
// - Its defaultRequest is the smallest defaultRequest of any of the LimitRanges that fits within the minimum and maximum
//
// This function isn't guaranteed to return a LimitRange with consistent constraints.
// For example, the minimum could be greater than the maximum.
func GetVirtualLimitRange(namespace string, lister corev1listers.LimitRangeLister) (*corev1.LimitRange, error) {
limitRanges, err := lister.LimitRanges(namespace).List(labels.Everything())
if err != nil {
return nil, err
}
var limitRange *corev1.LimitRange
switch {
case len(limitRanges) == 0:
// No LimitRange defined
break
case len(limitRanges) == 1:
// One LimitRange defined
limitRange = limitRanges[0]
default:
// Several LimitRange defined
limitRange = &corev1.LimitRange{}
m := map[corev1.LimitType]corev1.LimitRangeItem{}
for _, lr := range limitRanges {
for _, item := range lr.Spec.Limits {
_, exists := m[item.Type]
if !exists {
m[item.Type] = corev1.LimitRangeItem{
Type: item.Type,
Min: corev1.ResourceList{},
Max: corev1.ResourceList{},
Default: corev1.ResourceList{},
DefaultRequest: corev1.ResourceList{},
MaxLimitRequestRatio: corev1.ResourceList{},
}
}
// Min
m[item.Type].Min[corev1.ResourceCPU] = compare.MaxRequest(m[item.Type].Min[corev1.ResourceCPU], item.Min[corev1.ResourceCPU])
m[item.Type].Min[corev1.ResourceMemory] = compare.MaxRequest(m[item.Type].Min[corev1.ResourceMemory], item.Min[corev1.ResourceMemory])
m[item.Type].Min[corev1.ResourceEphemeralStorage] = compare.MaxRequest(m[item.Type].Min[corev1.ResourceEphemeralStorage], item.Min[corev1.ResourceEphemeralStorage])
// Max
m[item.Type].Max[corev1.ResourceCPU] = compare.MinLimit(m[item.Type].Max[corev1.ResourceCPU], item.Max[corev1.ResourceCPU])
m[item.Type].Max[corev1.ResourceMemory] = compare.MinLimit(m[item.Type].Max[corev1.ResourceMemory], item.Max[corev1.ResourceMemory])
m[item.Type].Max[corev1.ResourceEphemeralStorage] = compare.MinLimit(m[item.Type].Max[corev1.ResourceEphemeralStorage], item.Max[corev1.ResourceEphemeralStorage])
// MaxLimitRequestRatio
// The smallest ratio is the most restrictive
m[item.Type].MaxLimitRequestRatio[corev1.ResourceCPU] = compare.MinLimit(m[item.Type].MaxLimitRequestRatio[corev1.ResourceCPU], item.MaxLimitRequestRatio[corev1.ResourceCPU])
m[item.Type].MaxLimitRequestRatio[corev1.ResourceMemory] = compare.MinLimit(m[item.Type].MaxLimitRequestRatio[corev1.ResourceMemory], item.MaxLimitRequestRatio[corev1.ResourceMemory])
m[item.Type].MaxLimitRequestRatio[corev1.ResourceEphemeralStorage] = compare.MinLimit(m[item.Type].MaxLimitRequestRatio[corev1.ResourceEphemeralStorage], item.MaxLimitRequestRatio[corev1.ResourceEphemeralStorage])
}
}
// Handle Default and DefaultRequest
for _, lr := range limitRanges {
for _, item := range lr.Spec.Limits {
// Default
m[item.Type].Default[corev1.ResourceCPU] = minOfBetween(m[item.Type].Default[corev1.ResourceCPU], item.Default[corev1.ResourceCPU], m[item.Type].Min[corev1.ResourceCPU], m[item.Type].Max[corev1.ResourceCPU])
m[item.Type].Default[corev1.ResourceMemory] = minOfBetween(m[item.Type].Default[corev1.ResourceMemory], item.Default[corev1.ResourceMemory], m[item.Type].Min[corev1.ResourceMemory], m[item.Type].Max[corev1.ResourceMemory])
m[item.Type].Default[corev1.ResourceEphemeralStorage] = minOfBetween(m[item.Type].Default[corev1.ResourceEphemeralStorage], item.Default[corev1.ResourceEphemeralStorage], m[item.Type].Min[corev1.ResourceEphemeralStorage], m[item.Type].Max[corev1.ResourceEphemeralStorage])
// DefaultRequest
m[item.Type].DefaultRequest[corev1.ResourceCPU] = minOfBetween(m[item.Type].DefaultRequest[corev1.ResourceCPU], item.DefaultRequest[corev1.ResourceCPU], m[item.Type].Min[corev1.ResourceCPU], m[item.Type].Max[corev1.ResourceCPU])
m[item.Type].DefaultRequest[corev1.ResourceMemory] = minOfBetween(m[item.Type].DefaultRequest[corev1.ResourceMemory], item.DefaultRequest[corev1.ResourceMemory], m[item.Type].Min[corev1.ResourceMemory], m[item.Type].Max[corev1.ResourceMemory])
m[item.Type].DefaultRequest[corev1.ResourceEphemeralStorage] = minOfBetween(m[item.Type].DefaultRequest[corev1.ResourceEphemeralStorage], item.DefaultRequest[corev1.ResourceEphemeralStorage], m[item.Type].Min[corev1.ResourceCPU], m[item.Type].Max[corev1.ResourceCPU])
}
}
for _, v := range m {
limitRange.Spec.Limits = append(limitRange.Spec.Limits, v)
}
}
return limitRange, nil
}
func minOfBetween(a, b, min, max resource.Quantity) resource.Quantity {
if compare.IsZero(a) || (&a).Cmp(b) > 0 {
return b
}
return a
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tasklevel
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
// ApplyTaskLevelComputeResources applies the task-level compute resource requirements to each Step.
func ApplyTaskLevelComputeResources(steps []v1.Step, computeResources *corev1.ResourceRequirements) {
if computeResources == nil {
return
}
if computeResources.Requests == nil && computeResources.Limits == nil {
return
}
averageRequests := computeAverageRequests(computeResources.Requests, len(steps))
averageLimits := computeAverageRequests(computeResources.Limits, len(steps))
for i := range steps {
// if no requests are specified in step or task level, the limits are used to avoid
// unnecessary higher requests by Kubernetes default behavior.
if steps[i].ComputeResources.Requests == nil && computeResources.Requests == nil {
steps[i].ComputeResources.Requests = averageLimits
} else {
steps[i].ComputeResources.Requests = averageRequests
}
steps[i].ComputeResources.Limits = computeResources.Limits
}
}
// computeAverageRequests computes the average of the requests of all the steps.
func computeAverageRequests(requests corev1.ResourceList, steps int) corev1.ResourceList {
if len(requests) == 0 || steps == 0 {
return nil
}
averageRequests := corev1.ResourceList{}
for k, v := range requests {
if k == corev1.ResourceMemory || k == corev1.ResourceEphemeralStorage {
averageRequests[k] = *resource.NewQuantity(v.Value()/int64(steps), requests[k].Format)
continue
}
averageRequests[k] = *resource.NewMilliQuantity(v.MilliValue()/int64(steps), requests[k].Format)
}
return averageRequests
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package computeresources
import (
"context"
"github.com/tektoncd/pipeline/pkg/internal/computeresources/compare"
"github.com/tektoncd/pipeline/pkg/internal/computeresources/limitrange"
"github.com/tektoncd/pipeline/pkg/pod"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
corev1listers "k8s.io/client-go/listers/core/v1"
)
var resourceNames = []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory, corev1.ResourceEphemeralStorage}
// NewTransformer returns a pod.Transformer that will modify limits if needed
func NewTransformer(ctx context.Context, namespace string, lister corev1listers.LimitRangeLister) pod.Transformer {
return func(p *corev1.Pod) (*corev1.Pod, error) {
limitRange, err := limitrange.GetVirtualLimitRange(namespace, lister)
if err != nil {
return p, err
}
return transformPodBasedOnLimitRange(p, limitRange), nil
}
}
// transformPodBasedOnLimitRange modifies the pod's containers' resource requirements to meet the constraints of the LimitRange.
// The only supported type of LimitRange is "Container".
// For any container:
// - If the container has requests, they are set to the max of (requests, limitRange minimum).
// - If the container doesn't have requests, they are set to the max of (limitRange minimum, "default"),
// where "default" is the LimitRange defaultRequest (for init containers) or the LimitRange defaultRequest / # of app containers
// (for app containers).
// - If the container has limits, they are set to the min of (limits, limitRange maximum).
// - If the container doesn't have limits, they are set to the min of (limitRange maximum, limitRange default).
func transformPodBasedOnLimitRange(p *corev1.Pod, limitRange *corev1.LimitRange) *corev1.Pod {
// No LimitRange defined, nothing to transform, bail early we don't have anything to transform.
if limitRange == nil {
return p
}
// The assumption here is that the min, max, default, ratio have already been
// computed if there is multiple LimitRange to satisfy the most (if we can).
// Count the number of step containers in the Pod.
// This should help us find the smallest request to apply to containers
nbStepContainers := 0
for _, c := range p.Spec.Containers {
if pod.IsContainerStep(c.Name) {
nbStepContainers++
}
}
// FIXME(#4230) maxLimitRequestRatio to support later
defaultStepContainerRequests := getDefaultStepContainerRequest(limitRange, nbStepContainers)
for i, c := range p.Spec.Containers {
if !pod.IsContainerStep(c.Name) {
continue
}
if p.Spec.Containers[i].Resources.Requests == nil {
p.Spec.Containers[i].Resources.Requests = defaultStepContainerRequests
} else {
for _, name := range resourceNames {
setRequests(name, p.Spec.Containers[i].Resources.Requests, defaultStepContainerRequests)
}
}
}
return p
}
func setRequests(name corev1.ResourceName, dst, src corev1.ResourceList) {
if compare.IsZero(dst[name]) && !compare.IsZero(src[name]) {
dst[name] = src[name]
}
}
// Returns the default requests to use for each step container, determined by dividing the LimitRange default requests
// among the step containers, and applying the LimitRange minimum if necessary
func getDefaultStepContainerRequest(limitRange *corev1.LimitRange, nbContainers int) corev1.ResourceList {
// Support only Type Container to start with
var r corev1.ResourceList = map[corev1.ResourceName]resource.Quantity{}
for _, item := range limitRange.Spec.Limits {
// Only support LimitTypeContainer
if item.Type == corev1.LimitTypeContainer {
for _, name := range resourceNames {
var defaultRequest resource.Quantity
var min resource.Quantity
request := r[name]
if item.DefaultRequest != nil {
defaultRequest = item.DefaultRequest[name]
}
if item.Min != nil {
min = item.Min[name]
}
var result resource.Quantity
if name == corev1.ResourceMemory || name == corev1.ResourceEphemeralStorage {
result = compare.MaxRequest(request, *resource.NewQuantity(defaultRequest.Value()/int64(nbContainers), defaultRequest.Format), min)
} else {
result = compare.MaxRequest(request, *resource.NewMilliQuantity(defaultRequest.MilliValue()/int64(nbContainers), defaultRequest.Format), min)
}
// only set non-zero request values
if !compare.IsZero(result) {
r[name] = result
}
}
}
}
// return nil if the resource list is empty to avoid setting an empty defaultrequest
if len(r) == 0 {
return nil
}
return r
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaultresourcerequirements
import (
"context"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/pod"
corev1 "k8s.io/api/core/v1"
)
// NewTransformer returns a pod.Transformer that will modify container resources if needed
func NewTransformer(ctx context.Context) pod.Transformer {
// update init container and containers resource requirements
// resource limits and requests values are taken from a config map
configDefaults := config.FromContextOrDefaults(ctx).Defaults
return func(pod *corev1.Pod) (*corev1.Pod, error) {
return updateResourceRequirements(configDefaults.DefaultContainerResourceRequirements, pod), nil
}
}
// updates init containers and containers resource requirements of a pod base of config_defaults configmap.
func updateResourceRequirements(resourceRequirementsMap map[string]corev1.ResourceRequirements, pod *corev1.Pod) *corev1.Pod {
if len(resourceRequirementsMap) == 0 {
return pod
}
// collect all the available container names from the resource requirement map
// some of the container names: place-scripts, prepare, working-dir-initializer
// some of the container names with prefix: prefix-scripts, prefix-sidecar-scripts
containerNames := []string{}
containerNamesWithPrefix := []string{}
for containerName := range resourceRequirementsMap {
// skip the default key
if containerName == config.ResourceRequirementDefaultContainerKey {
continue
}
if strings.HasPrefix(containerName, "prefix-") {
containerNamesWithPrefix = append(containerNamesWithPrefix, containerName)
} else {
containerNames = append(containerNames, containerName)
}
}
// update the containers resource requirements which does not have resource requirements
for _, containerName := range containerNames {
resourceRequirements := resourceRequirementsMap[containerName]
if resourceRequirements.Size() == 0 {
continue
}
// update init containers
for index := range pod.Spec.InitContainers {
targetContainer := pod.Spec.InitContainers[index]
if containerName == targetContainer.Name && targetContainer.Resources.Size() == 0 {
pod.Spec.InitContainers[index].Resources = resourceRequirements
}
}
// update containers
for index := range pod.Spec.Containers {
targetContainer := pod.Spec.Containers[index]
if containerName == targetContainer.Name && targetContainer.Resources.Size() == 0 {
pod.Spec.Containers[index].Resources = resourceRequirements
}
}
}
// update the containers resource requirements which does not have resource requirements with the mentioned prefix
for _, containerPrefix := range containerNamesWithPrefix {
resourceRequirements := resourceRequirementsMap[containerPrefix]
if resourceRequirements.Size() == 0 {
continue
}
// get actual container name, remove "prefix-" string and append "-" at the end
// append '-' in the container prefix
containerPrefix = strings.Replace(containerPrefix, "prefix-", "", 1)
containerPrefix += "-"
// update init containers
for index := range pod.Spec.InitContainers {
targetContainer := pod.Spec.InitContainers[index]
if strings.HasPrefix(targetContainer.Name, containerPrefix) && targetContainer.Resources.Size() == 0 {
pod.Spec.InitContainers[index].Resources = resourceRequirements
}
}
// update containers
for index := range pod.Spec.Containers {
targetContainer := pod.Spec.Containers[index]
if strings.HasPrefix(targetContainer.Name, containerPrefix) && targetContainer.Resources.Size() == 0 {
pod.Spec.Containers[index].Resources = resourceRequirements
}
}
}
// reset of the containers resource requirements which has empty resource requirements
if resourceRequirements, found := resourceRequirementsMap[config.ResourceRequirementDefaultContainerKey]; found && resourceRequirements.Size() != 0 {
// update init containers
for index := range pod.Spec.InitContainers {
if pod.Spec.InitContainers[index].Resources.Size() == 0 {
pod.Spec.InitContainers[index].Resources = resourceRequirements
}
}
// update containers
for index := range pod.Spec.Containers {
if pod.Spec.Containers[index].Resources.Size() == 0 {
pod.Spec.Containers[index].Resources = resourceRequirements
}
}
}
return pod
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resolution
import (
"encoding/base64"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
common "github.com/tektoncd/pipeline/pkg/resolution/common"
corev1 "k8s.io/api/core/v1"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
// CreateResolutionRequestStatusWithData returns a ResolutionRequestStatus with the resolved content.
func CreateResolutionRequestStatusWithData(content []byte) *v1beta1.ResolutionRequestStatus {
return &v1beta1.ResolutionRequestStatus{
Status: duckv1.Status{},
ResolutionRequestStatusFields: v1beta1.ResolutionRequestStatusFields{
Data: base64.StdEncoding.Strict().EncodeToString(content),
},
}
}
// CreateResolutionRequestFailureStatus returns a ResolutionRequestStatus with failure.
func CreateResolutionRequestFailureStatus() *v1beta1.ResolutionRequestStatus {
return &v1beta1.ResolutionRequestStatus{
Status: duckv1.Status{
Conditions: duckv1.Conditions{{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: common.ReasonResolutionFailed,
}},
},
}
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resultref
import (
"fmt"
"regexp"
"strconv"
"strings"
)
const (
resultExpressionFormat = "tasks.<taskName>.results.<resultName>"
stepResultExpressionFormat = "steps.<stepName>.results.<resultName>"
// Result expressions of the form <resultName>.<attribute> will be treated as object results.
// If a string result name contains a dot, brackets should be used to differentiate it from an object result.
// https://github.com/tektoncd/community/blob/main/teps/0075-object-param-and-result-types.md#collisions-with-builtin-variable-replacement
objectResultExpressionFormat = "tasks.<taskName>.results.<objectResultName>.<individualAttribute>"
objectStepResultExpressionFormat = "steps.<stepName>.results.<objectResultName>.<individualAttribute>"
// ResultStepPart Constant used to define the "steps" part of a step result reference
ResultStepPart = "steps"
// ResultTaskPart Constant used to define the "tasks" part of a pipeline result reference
ResultTaskPart = "tasks"
// ResultFinallyPart Constant used to define the "finally" part of a pipeline result reference
ResultFinallyPart = "finally"
// ResultResultPart Constant used to define the "results" part of a pipeline result reference
ResultResultPart = "results"
// arrayIndexing will match all `[int]` and `[*]` for parseExpression
arrayIndexing = `\[([0-9])*\*?\]`
stepResultUsagePattern = `\$\(steps\..*?\.results\..*?\)`
)
// arrayIndexingRegex is used to match `[int]` and `[*]`
var arrayIndexingRegex = regexp.MustCompile(arrayIndexing)
// StepResultRegex compiles the regex pattern for the usage of step results.
var StepResultRegex = regexp.MustCompile(stepResultUsagePattern)
// LooksLikeResultRef attempts to check if the given string looks like it contains any
// result references. Returns true if it does, false otherwise
func LooksLikeResultRef(expression string) bool {
subExpressions := strings.Split(expression, ".")
return len(subExpressions) >= 4 && (subExpressions[0] == ResultTaskPart || subExpressions[0] == ResultFinallyPart) && subExpressions[2] == ResultResultPart
}
// looksLikeStepResultRef attempts to check if the given string looks like it contains any
// step result references. Returns true if it does, false otherwise
func looksLikeStepResultRef(expression string) bool {
subExpressions := strings.Split(expression, ".")
return len(subExpressions) >= 4 && subExpressions[0] == ResultStepPart && subExpressions[2] == ResultResultPart
}
// ParsedResult captures the task/step name, result name, type,
// array idx (in case of array result) and
// object key (in case of an object result).
// This is generated by parsing expressions that use
// $(tasks.taskName.results.resultName...) or $(steps.stepName.results.resultName...)
type ParsedResult struct {
ResourceName string
ResultName string
ResultType string
ArrayIdx *int
ObjectKey string
}
// parseExpression parses "task name", "result name", "array index" (iff it's an array result) and "object key name" (iff it's an object result)
// 1. Reference string result
// - Input: tasks.myTask.results.aStringResult
// - Output: "myTask", "aStringResult", nil, "", nil
// 2. Reference Object value with key:
// - Input: tasks.myTask.results.anObjectResult.key1
// - Output: "myTask", "anObjectResult", nil, "key1", nil
// 3. Reference array elements with array indexing :
// - Input: tasks.myTask.results.anArrayResult[1]
// - Output: "myTask", "anArrayResult", 1, "", nil
// 4. Referencing whole array or object result:
// - Input: tasks.myTask.results.Result[*]
// - Output: "myTask", "Result", nil, "", nil
// Invalid Case:
// - Input: tasks.myTask.results.resultName.foo.bar
// - Output: "", "", nil, "", error
// TODO: may use regex for each type to handle possible reference formats
func parseExpression(substitutionExpression string) (ParsedResult, error) {
if LooksLikeResultRef(substitutionExpression) || looksLikeStepResultRef(substitutionExpression) {
subExpressions := strings.Split(substitutionExpression, ".")
// For string result: tasks.<taskName>.results.<stringResultName>
// For string step result: steps.<stepName>.results.<stringResultName>
// For array result: tasks.<taskName>.results.<arrayResultName>[index]
// For array step result: steps.<stepName>.results.<arrayResultName>[index]
if len(subExpressions) == 4 {
resultName, stringIdx := ParseResultName(subExpressions[3])
if stringIdx != "" {
if stringIdx == "*" {
pr := ParsedResult{
ResourceName: subExpressions[1],
ResultName: resultName,
ResultType: "array",
}
return pr, nil
}
intIdx, _ := strconv.Atoi(stringIdx)
pr := ParsedResult{
ResourceName: subExpressions[1],
ResultName: resultName,
ResultType: "array",
ArrayIdx: &intIdx,
}
return pr, nil
}
pr := ParsedResult{
ResourceName: subExpressions[1],
ResultName: resultName,
ResultType: "string",
}
return pr, nil
} else if len(subExpressions) == 5 {
// For object type result: tasks.<taskName>.results.<objectResultName>.<individualAttribute>
// For object type step result: steps.<stepName>.results.<objectResultName>.<individualAttribute>
pr := ParsedResult{
ResourceName: subExpressions[1],
ResultName: subExpressions[3],
ResultType: "object",
ObjectKey: subExpressions[4],
}
return pr, nil
}
}
return ParsedResult{}, fmt.Errorf("must be one of the form 1). %q; 2). %q; 3). %q; 4). %q", resultExpressionFormat, objectResultExpressionFormat, stepResultExpressionFormat, objectStepResultExpressionFormat)
}
// ParseTaskExpression parses the input string and searches for the use of task result usage.
func ParseTaskExpression(substitutionExpression string) (ParsedResult, error) {
if LooksLikeResultRef(substitutionExpression) {
return parseExpression(substitutionExpression)
}
return ParsedResult{}, fmt.Errorf("must be one of the form 1). %q; 2). %q", resultExpressionFormat, objectResultExpressionFormat)
}
// ParseStepExpression parses the input string and searches for the use of step result usage.
func ParseStepExpression(substitutionExpression string) (ParsedResult, error) {
if looksLikeStepResultRef(substitutionExpression) {
return parseExpression(substitutionExpression)
}
return ParsedResult{}, fmt.Errorf("must be one of the form 1). %q; 2). %q", stepResultExpressionFormat, objectStepResultExpressionFormat)
}
// ParseResultName parse the input string to extract resultName and result index.
// Array indexing:
// Input: anArrayResult[1]
// Output: anArrayResult, "1"
// Array star reference:
// Input: anArrayResult[*]
// Output: anArrayResult, "*"
func ParseResultName(resultName string) (string, string) {
stringIdx := strings.TrimSuffix(strings.TrimPrefix(arrayIndexingRegex.FindString(resultName), "["), "]")
resultName = arrayIndexingRegex.ReplaceAllString(resultName, "")
return resultName, stringIdx
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package list
import "fmt"
// IsSame will return an error indicating if there are extra or missing strings
// between the required and provided strings, or will return no error if the two
// contain the same values.
func IsSame(required, provided []string) error {
missing := DiffLeft(required, provided)
if len(missing) > 0 {
return fmt.Errorf("didn't provide required values: %s", missing)
}
extra := DiffLeft(provided, required)
if len(extra) > 0 {
return fmt.Errorf("provided extra values: %s", extra)
}
return nil
}
// DiffLeft will return all strings which are in the left slice of strings but
// not in the right.
func DiffLeft(left, right []string) []string {
extra := []string{}
for _, s := range left {
found := false
for _, s2 := range right {
if s == s2 {
found = true
}
}
if !found {
extra = append(extra, s)
}
}
return extra
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package names
import (
"fmt"
"hash/fnv"
"regexp"
"strconv"
"strings"
utilrand "k8s.io/apimachinery/pkg/util/rand"
)
// NameGenerator generates names for objects. Some backends may have more information
// available to guide selection of new names and this interface hides those details.
type NameGenerator interface {
// RestrictLengthWithRandomSuffix generates a valid name from the base name, adding a random suffix to
// the base. If base is valid, the returned name must also be valid. The generator is
// responsible for knowing the maximum valid name length.
RestrictLengthWithRandomSuffix(base string) string
// RestrictLength generates a valid name from the name of a step specified in a Task,
// shortening it to the maximum valid name length if needed.
RestrictLength(base string) string
}
// simpleNameGenerator generates random names.
type simpleNameGenerator struct{}
// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics
// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes
// name (63 characters)
var SimpleNameGenerator NameGenerator = simpleNameGenerator{}
const (
// TODO: make this flexible for non-core resources with alternate naming rules.
maxNameLength = 63
randomLength = 5
maxGeneratedNameLength = maxNameLength - randomLength - 1
)
// RestrictLengthWithRandomSuffix takes a base name and returns a potentially shortened version of that name with
// a random suffix, with the whole string no longer than 63 characters.
func (simpleNameGenerator) RestrictLengthWithRandomSuffix(base string) string {
if len(base) > maxGeneratedNameLength {
base = base[:maxGeneratedNameLength]
}
return fmt.Sprintf("%s-%s", base, utilrand.String(randomLength))
}
var alphaNumericRE = regexp.MustCompile(`^[a-zA-Z0-9]+$`)
// RestrictLength takes a base name and returns a potentially shortened version of that name, no longer than 63 characters.
func (simpleNameGenerator) RestrictLength(base string) string {
if len(base) > maxNameLength {
base = base[:maxNameLength]
}
for !alphaNumericRE.MatchString(base[len(base)-1:]) {
base = base[:len(base)-1]
}
return base
}
// GenerateHashedName creates a unique name with a hashed suffix.
func GenerateHashedName(prefix, name string, hashedLength int) string {
if hashedLength <= 0 {
hashedLength = randomLength
}
h := fnv.New32a()
h.Write([]byte(name))
suffix := strconv.FormatUint(uint64(h.Sum32()), 16)
if ln := len(suffix); ln > hashedLength {
suffix = suffix[:hashedLength]
} else if ln < hashedLength {
suffix += strings.Repeat("0", hashedLength-ln)
}
return fmt.Sprintf("%s-%s", prefix, suffix)
}
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"context"
_ "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun/fake" // Make sure the fake pipelinerun informer is setup
"github.com/tektoncd/pipeline/pkg/pipelinerunmetrics"
"k8s.io/client-go/rest"
"knative.dev/pkg/injection"
)
func init() {
injection.Fake.RegisterClient(func(ctx context.Context, _ *rest.Config) context.Context { return pipelinerunmetrics.WithClient(ctx) })
injection.Fake.RegisterInformer(pipelinerunmetrics.WithInformer)
}
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipelinerunmetrics
import (
"context"
pipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun"
listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
"k8s.io/client-go/rest"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection"
"knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterClient(func(ctx context.Context, _ *rest.Config) context.Context { return WithClient(ctx) })
injection.Default.RegisterInformer(WithInformer)
}
// RecorderKey is used for associating the Recorder inside the context.Context.
type RecorderKey struct{}
// WithClient adds a metrics recorder to the given context
func WithClient(ctx context.Context) context.Context {
rec, err := NewRecorder(ctx)
if err != nil {
logging.FromContext(ctx).Errorf("Failed to create pipelinerun metrics recorder %v", err)
}
return context.WithValue(ctx, RecorderKey{}, rec)
}
// Get extracts the pipelinerunmetrics.Recorder from the context.
func Get(ctx context.Context) *Recorder {
untyped := ctx.Value(RecorderKey{})
if untyped == nil {
logging.FromContext(ctx).Panic("Unable to fetch *pipelinerunmetrics.Recorder from context.")
}
return untyped.(*Recorder)
}
// InformerKey is used for associating the Informer inside the context.Context.
type InformerKey struct{}
// WithInformer returns the given context, and a configured informer
func WithInformer(ctx context.Context) (context.Context, controller.Informer) {
return ctx, &recorderInformer{
ctx: ctx,
metrics: Get(ctx),
lister: pipelineruninformer.Get(ctx).Lister(),
}
}
type recorderInformer struct {
ctx context.Context
metrics *Recorder
lister listers.PipelineRunLister
}
var _ controller.Informer = (*recorderInformer)(nil)
// Run starts the recorder informer in a goroutine
func (ri *recorderInformer) Run(stopCh <-chan struct{}) {
// Turn the stopCh into a context for reporting metrics.
ctx, cancel := context.WithCancel(ri.ctx)
go func() {
<-stopCh
cancel()
}()
go ri.metrics.ReportRunningPipelineRuns(ctx, ri.lister)
}
// HasSynced returns whether the informer has synced, which in this case will always be true.
func (ri *recorderInformer) HasSynced() bool {
return true
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipelinerunmetrics
import (
"context"
"encoding/hex"
"errors"
"fmt"
"sync"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.uber.org/zap"
"golang.org/x/crypto/blake2b"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/labels"
"knative.dev/pkg/apis"
"knative.dev/pkg/logging"
"knative.dev/pkg/metrics"
)
const (
runningPRLevelPipelinerun = "pipelinerun"
runningPRLevelPipeline = "pipeline"
runningPRLevelNamespace = "namespace"
runningPRLevelCluster = ""
)
var (
pipelinerunTag = tag.MustNewKey("pipelinerun")
pipelineTag = tag.MustNewKey("pipeline")
namespaceTag = tag.MustNewKey("namespace")
statusTag = tag.MustNewKey("status")
reasonTag = tag.MustNewKey("reason")
prDuration = stats.Float64(
"pipelinerun_duration_seconds",
"The pipelinerun execution time in seconds",
stats.UnitDimensionless)
prDurationView *view.View
prTotal = stats.Float64("pipelinerun_total",
"Number of pipelineruns",
stats.UnitDimensionless)
prTotalView *view.View
runningPRs = stats.Float64("running_pipelineruns",
"Number of pipelineruns executing currently",
stats.UnitDimensionless)
runningPRsView *view.View
runningPRsWaitingOnPipelineResolution = stats.Float64("running_pipelineruns_waiting_on_pipeline_resolution",
"Number of pipelineruns executing currently that are waiting on resolution requests for their pipeline references.",
stats.UnitDimensionless)
runningPRsWaitingOnPipelineResolutionView *view.View
runningPRsWaitingOnTaskResolution = stats.Float64("running_pipelineruns_waiting_on_task_resolution",
"Number of pipelineruns executing currently that are waiting on resolution requests for the task references of their taskrun children.",
stats.UnitDimensionless)
runningPRsWaitingOnTaskResolutionView *view.View
)
const (
// ReasonCancelled indicates that a PipelineRun was cancelled.
// Aliased for backwards compatibility; additional reasons should not be added here.
ReasonCancelled = v1.PipelineRunReasonCancelled
anonymous = "anonymous"
)
// Recorder holds keys for Tekton metrics
type Recorder struct {
mutex sync.Mutex
initialized bool
cfg *config.Metrics
insertTag func(pipeline,
pipelinerun string) []tag.Mutator
ReportingPeriod time.Duration
hash string
}
// We cannot register the view multiple times, so NewRecorder lazily
// initializes this singleton and returns the same recorder across any
// subsequent invocations.
var (
once sync.Once
r *Recorder
errRegistering error
)
// NewRecorder creates a new metrics recorder instance
// to log the PipelineRun related metrics
func NewRecorder(ctx context.Context) (*Recorder, error) {
once.Do(func() {
r = &Recorder{
initialized: true,
// Default to 30s intervals.
ReportingPeriod: 30 * time.Second,
}
cfg := config.FromContextOrDefaults(ctx)
r.cfg = cfg.Metrics
errRegistering = viewRegister(cfg.Metrics)
if errRegistering != nil {
r.initialized = false
return
}
})
return r, errRegistering
}
func viewRegister(cfg *config.Metrics) error {
r.mutex.Lock()
defer r.mutex.Unlock()
var prunTag []tag.Key
switch cfg.PipelinerunLevel {
case config.PipelinerunLevelAtPipelinerun:
prunTag = []tag.Key{pipelinerunTag, pipelineTag}
r.insertTag = pipelinerunInsertTag
case config.PipelinerunLevelAtPipeline:
prunTag = []tag.Key{pipelineTag}
r.insertTag = pipelineInsertTag
case config.PipelinerunLevelAtNS:
prunTag = []tag.Key{}
r.insertTag = nilInsertTag
default:
return errors.New("invalid config for PipelinerunLevel: " + cfg.PipelinerunLevel)
}
var runningPRTag []tag.Key
switch cfg.RunningPipelinerunLevel {
case config.PipelinerunLevelAtPipelinerun:
runningPRTag = []tag.Key{pipelinerunTag, pipelineTag, namespaceTag}
case config.PipelinerunLevelAtPipeline:
runningPRTag = []tag.Key{pipelineTag, namespaceTag}
case config.PipelinerunLevelAtNS:
runningPRTag = []tag.Key{namespaceTag}
default:
runningPRTag = []tag.Key{}
}
distribution := view.Distribution(10, 30, 60, 300, 900, 1800, 3600, 5400, 10800, 21600, 43200, 86400)
if cfg.PipelinerunLevel == config.PipelinerunLevelAtPipelinerun {
distribution = view.LastValue()
} else {
switch cfg.DurationPipelinerunType {
case config.DurationTaskrunTypeHistogram:
case config.DurationTaskrunTypeLastValue:
distribution = view.LastValue()
default:
return errors.New("invalid config for DurationTaskrunType: " + cfg.DurationTaskrunType)
}
}
if cfg.CountWithReason {
prunTag = append(prunTag, reasonTag)
}
prDurationView = &view.View{
Description: prDuration.Description(),
Measure: prDuration,
Aggregation: distribution,
TagKeys: append([]tag.Key{statusTag, namespaceTag}, prunTag...),
}
prTotalView = &view.View{
Description: prTotal.Description(),
Measure: prTotal,
Aggregation: view.Count(),
TagKeys: []tag.Key{statusTag},
}
runningPRsView = &view.View{
Description: runningPRs.Description(),
Measure: runningPRs,
Aggregation: view.LastValue(),
TagKeys: runningPRTag,
}
runningPRsWaitingOnPipelineResolutionView = &view.View{
Description: runningPRsWaitingOnPipelineResolution.Description(),
Measure: runningPRsWaitingOnPipelineResolution,
Aggregation: view.LastValue(),
}
runningPRsWaitingOnTaskResolutionView = &view.View{
Description: runningPRsWaitingOnTaskResolution.Description(),
Measure: runningPRsWaitingOnTaskResolution,
Aggregation: view.LastValue(),
}
return view.Register(
prDurationView,
prTotalView,
runningPRsView,
runningPRsWaitingOnPipelineResolutionView,
runningPRsWaitingOnTaskResolutionView,
)
}
func viewUnregister() {
view.Unregister(prDurationView,
prTotalView,
runningPRsView,
runningPRsWaitingOnPipelineResolutionView,
runningPRsWaitingOnTaskResolutionView)
}
// OnStore returns a function that checks if metrics are configured for a config.Store, and registers it if so
func OnStore(logger *zap.SugaredLogger, r *Recorder) func(name string,
value interface{}) {
return func(name string, value interface{}) {
if name == config.GetMetricsConfigName() {
cfg, ok := value.(*config.Metrics)
if !ok {
logger.Error("Failed to do type insertion for extracting metrics config")
return
}
updated := r.updateConfig(cfg)
if !updated {
return
}
// Update metrics according to configuration
viewUnregister()
err := viewRegister(cfg)
if err != nil {
logger.Errorf("Failed to register View %v ", err)
return
}
}
}
}
func pipelinerunInsertTag(pipeline, pipelinerun string) []tag.Mutator {
return []tag.Mutator{
tag.Insert(pipelineTag, pipeline),
tag.Insert(pipelinerunTag, pipelinerun),
}
}
func pipelineInsertTag(pipeline, pipelinerun string) []tag.Mutator {
return []tag.Mutator{tag.Insert(pipelineTag, pipeline)}
}
func nilInsertTag(task, taskrun string) []tag.Mutator {
return []tag.Mutator{}
}
func getPipelineTagName(pr *v1.PipelineRun) string {
pipelineName := anonymous
switch {
case pr.Spec.PipelineRef != nil && pr.Spec.PipelineRef.Name != "":
pipelineName = pr.Spec.PipelineRef.Name
case pr.Spec.PipelineSpec != nil:
default:
if len(pr.Labels) > 0 {
pipelineLabel, hasPipelineLabel := pr.Labels[pipeline.PipelineLabelKey]
if hasPipelineLabel && len(pipelineLabel) > 0 {
pipelineName = pipelineLabel
}
}
}
return pipelineName
}
func (r *Recorder) updateConfig(cfg *config.Metrics) bool {
r.mutex.Lock()
defer r.mutex.Unlock()
var hash string
if cfg != nil {
s := fmt.Sprintf("%v", *cfg)
sum := blake2b.Sum256([]byte(s))
hash = hex.EncodeToString(sum[:])
}
if r.hash == hash {
return false
}
r.cfg = cfg
r.hash = hash
return true
}
// DurationAndCount logs the duration of PipelineRun execution and
// count for number of PipelineRuns succeed or failed
// returns an error if it fails to log the metrics
func (r *Recorder) DurationAndCount(pr *v1.PipelineRun, beforeCondition *apis.Condition) error {
if !r.initialized {
return fmt.Errorf("ignoring the metrics recording for %s , failed to initialize the metrics recorder", pr.Name)
}
afterCondition := pr.Status.GetCondition(apis.ConditionSucceeded)
// To avoid recount
if equality.Semantic.DeepEqual(beforeCondition, afterCondition) {
return nil
}
r.mutex.Lock()
defer r.mutex.Unlock()
duration := time.Duration(0)
if pr.Status.StartTime != nil {
duration = time.Since(pr.Status.StartTime.Time)
if pr.Status.CompletionTime != nil {
duration = pr.Status.CompletionTime.Sub(pr.Status.StartTime.Time)
}
}
cond := pr.Status.GetCondition(apis.ConditionSucceeded)
status := "success"
if cond.Status == corev1.ConditionFalse {
status = "failed"
if cond.Reason == v1.PipelineRunReasonCancelled.String() {
status = "cancelled"
}
}
reason := cond.Reason
pipelineName := getPipelineTagName(pr)
ctx, err := tag.New(
context.Background(),
append([]tag.Mutator{
tag.Insert(namespaceTag, pr.Namespace),
tag.Insert(statusTag, status), tag.Insert(reasonTag, reason),
}, r.insertTag(pipelineName, pr.Name)...)...)
if err != nil {
return err
}
metrics.Record(ctx, prDuration.M(duration.Seconds()))
metrics.Record(ctx, prTotal.M(1))
return nil
}
// RunningPipelineRuns logs the number of PipelineRuns running right now
// returns an error if it fails to log the metrics
func (r *Recorder) RunningPipelineRuns(lister listers.PipelineRunLister) error {
r.mutex.Lock()
defer r.mutex.Unlock()
if !r.initialized {
return errors.New("ignoring the metrics recording, failed to initialize the metrics recorder")
}
prs, err := lister.List(labels.Everything())
if err != nil {
return fmt.Errorf("failed to list pipelineruns while generating metrics : %w", err)
}
var runningPipelineRuns int
var trsWaitResolvingTaskRef int
var prsWaitResolvingPipelineRef int
countMap := map[string]int{}
for _, pr := range prs {
pipelineName := getPipelineTagName(pr)
pipelineRunKey := ""
mutators := []tag.Mutator{
tag.Insert(namespaceTag, pr.Namespace),
tag.Insert(pipelineTag, pipelineName),
tag.Insert(pipelinerunTag, pr.Name),
}
if r.cfg != nil {
switch r.cfg.RunningPipelinerunLevel {
case runningPRLevelPipelinerun:
pipelineRunKey = pipelineRunKey + "#" + pr.Name
fallthrough
case runningPRLevelPipeline:
pipelineRunKey = pipelineRunKey + "#" + pipelineName
fallthrough
case runningPRLevelNamespace:
pipelineRunKey = pipelineRunKey + "#" + pr.Namespace
case runningPRLevelCluster:
default:
return fmt.Errorf("RunningPipelineRunLevel value \"%s\" is not valid ", r.cfg.RunningPipelinerunLevel)
}
}
ctx_, err_ := tag.New(context.Background(), mutators...)
if err_ != nil {
return err
}
if !pr.IsDone() && !pr.IsPending() {
countMap[pipelineRunKey]++
metrics.Record(ctx_, runningPRs.M(float64(countMap[pipelineRunKey])))
runningPipelineRuns++
succeedCondition := pr.Status.GetCondition(apis.ConditionSucceeded)
if succeedCondition != nil && succeedCondition.Status == corev1.ConditionUnknown {
switch succeedCondition.Reason {
case v1.TaskRunReasonResolvingTaskRef:
trsWaitResolvingTaskRef++
case v1.PipelineRunReasonResolvingPipelineRef.String():
prsWaitResolvingPipelineRef++
}
}
} else {
// In case there are no running PipelineRuns for the pipelineRunKey, set the metric value to 0 to ensure
// the metric is set for the key.
if _, exists := countMap[pipelineRunKey]; !exists {
countMap[pipelineRunKey] = 0
metrics.Record(ctx_, runningPRs.M(0))
}
}
}
ctx, err := tag.New(context.Background())
if err != nil {
return err
}
metrics.Record(ctx, runningPRsWaitingOnPipelineResolution.M(float64(prsWaitResolvingPipelineRef)))
metrics.Record(ctx, runningPRsWaitingOnTaskResolution.M(float64(trsWaitResolvingTaskRef)))
metrics.Record(ctx, runningPRs.M(float64(runningPipelineRuns)))
return nil
}
// ReportRunningPipelineRuns invokes RunningPipelineRuns on our configured PeriodSeconds
// until the context is cancelled.
func (r *Recorder) ReportRunningPipelineRuns(ctx context.Context, lister listers.PipelineRunLister) {
logger := logging.FromContext(ctx)
for {
delay := time.NewTimer(r.ReportingPeriod)
select {
case <-ctx.Done():
// When the context is cancelled, stop reporting.
if !delay.Stop() {
<-delay.C
}
return
case <-delay.C:
// Every 30s surface a metric for the number of running pipelines.
if err := r.RunningPipelineRuns(lister); err != nil {
logger.Warnf("Failed to log the metrics : %v", err)
}
}
}
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import (
"errors"
"fmt"
"log/slog"
"path"
"runtime"
"sync"
)
var (
errNotImplemented = errors.New("not implemented")
)
const (
Unknown = "unknown"
Arm = "arm"
Arm64 = "arm64"
Windows = "windows"
Darwin = "darwin"
FreeBSD = "freebsd"
Linux = "linux"
)
// Platform describes the platform which the image in the manifest runs on.
type Platform struct {
// Architecture field specifies the CPU architecture, for example
// `amd64` or `ppc64le`.
Architecture string `json:"architecture"`
// OS specifies the operating system, for example `linux` or `windows`.
OS string `json:"os"`
// OSVersion is an optional field specifying the operating system
// version, for example on Windows `10.0.14393.1066`.
OSVersion string `json:"os.version,omitempty"`
// OSFeatures is an optional field specifying an array of strings,
// each listing a required OS feature (for example on Windows `win32k`).
OSFeatures []string `json:"os.features,omitempty"`
// Variant is an optional field specifying a variant of the CPU, for
// example `v7` to specify ARMv7 when architecture is `arm`.
Variant string `json:"variant,omitempty"`
}
func NewPlatform() *Platform {
p := &Platform{
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
Variant: cpuVariant(),
}
return p
}
func (p *Platform) Format() string {
if p.OS == "" {
return Unknown
}
return path.Join(p.OS, p.Architecture, p.Variant)
}
// Present the ARM instruction set architecture, eg: v7, v8
// Don't use this value directly; call cpuVariant() instead.
var cpuVariantValue string
var cpuVariantOnce sync.Once
func cpuVariant() string {
cpuVariantOnce.Do(func() {
if isArmArch(runtime.GOARCH) {
var err error
cpuVariantValue, err = getCPUVariant()
if err != nil {
slog.Error("failed to get CPU variant", "os", runtime.GOOS, "error", err)
}
}
})
return cpuVariantValue
}
// isArmArch returns true if the architecture is ARM.
//
// The arch value should be normalized before being passed to this function.
func isArmArch(arch string) bool {
switch arch {
case Arm, Arm64:
return true
}
return false
}
func getCPUVariant() (string, error) {
var variant string
switch runtime.GOOS {
case Windows, Darwin:
// Windows/Darwin only supports v7 for ARM32 and v8 for ARM64
switch runtime.GOARCH {
case Arm64:
variant = "v8"
case Arm:
variant = "v7"
default:
variant = Unknown
}
case Linux, FreeBSD:
// FreeBSD supports ARMv6 and ARMv7 as well as ARMv4 and ARMv5 (though deprecated)
// detecting those variants is currently unimplemented
switch runtime.GOARCH {
case Arm64:
variant = "v8"
default:
variant = Unknown
}
default:
return "", fmt.Errorf("getCPUVariant for OS %s: %w", runtime.GOOS, errNotImplemented)
}
return variant, nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"context"
"errors"
"fmt"
"regexp"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/credentials/dockercreds"
"github.com/tektoncd/pipeline/pkg/credentials/gitcreds"
credmatcher "github.com/tektoncd/pipeline/pkg/credentials/matcher"
credwriter "github.com/tektoncd/pipeline/pkg/credentials/writer"
"github.com/tektoncd/pipeline/pkg/names"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
)
const (
credsInitHomeMountPrefix = "tekton-creds-init-home" // #nosec
sshKnownHosts = "known_hosts"
)
var dnsLabel1123Forbidden = regexp.MustCompile("[^a-zA-Z0-9-]+")
// credsInit reads secrets available to the given service account and
// searches for annotations matching a specific format (documented in
// docs/auth.md). Matching secrets are turned into Volumes for the Pod
// and VolumeMounts to be given to each Step. Additionally, a list of
// entrypointer arguments are returned, each with a meaning specific to
// the credential type it describes: git credentials expect one set of
// args while docker credentials expect another.
//
// Any errors encountered during this process are returned to the
// caller. If no matching annotated secrets are found, nil lists with a
// nil error are returned.
func credsInit(ctx context.Context, obj runtime.Object, serviceAccountName, namespace string, kubeclient kubernetes.Interface) ([]string, []corev1.Volume, []corev1.VolumeMount, error) {
logger := logging.FromContext(ctx)
cfg := config.FromContextOrDefaults(ctx)
if cfg != nil && cfg.FeatureFlags != nil && cfg.FeatureFlags.DisableCredsInit {
return nil, nil, nil, nil
}
// service account if not specified in pipeline/task spec, read it from the ConfigMap
// and defaults to `default` if its missing from the ConfigMap as well
if serviceAccountName == "" {
serviceAccountName = config.DefaultServiceAccountValue
}
sa, err := kubeclient.CoreV1().ServiceAccounts(namespace).Get(ctx, serviceAccountName, metav1.GetOptions{})
if err != nil {
return nil, nil, nil, err
}
builders := []interface {
credmatcher.Matcher
credwriter.Writer
}{dockercreds.NewBuilder(), gitcreds.NewBuilder()}
var volumeMounts []corev1.VolumeMount
var volumes []corev1.Volume
var args []string
var missingSecrets []string
defer func() {
recorder := controller.GetEventRecorder(ctx)
if len(missingSecrets) > 0 && recorder != nil && obj != nil {
recorder.Eventf(obj, corev1.EventTypeWarning, "FailedToRetrieveSecret",
"Unable to retrieve some secrets (%s); attempting to use them may not succeed.",
strings.Join(missingSecrets, ", "))
}
}()
// Track duplicated secrets, prevent errors like this:
// Pod "xxx" is invalid: spec.containers[0].volumeMounts[12].mountPath: Invalid value:
// "/tekton/creds-secrets/demo-docker-credentials": must be unique
visitedSecrets := make(map[string]struct{})
for _, secretEntry := range sa.Secrets {
if secretEntry.Name == "" {
continue
}
if _, ok := visitedSecrets[secretEntry.Name]; ok {
continue
}
visitedSecrets[secretEntry.Name] = struct{}{}
secret, err := kubeclient.CoreV1().Secrets(namespace).Get(ctx, secretEntry.Name, metav1.GetOptions{})
if k8serrors.IsNotFound(err) {
missingSecrets = append(missingSecrets, secretEntry.Name)
logger.Warnf("Secret %q in ServiceAccount %s/%s not found, skipping", secretEntry.Name, namespace, serviceAccountName)
continue
}
if err != nil {
return nil, nil, nil, err
}
if err := checkGitSSHSecret(ctx, secret); err != nil {
return nil, nil, nil, err
}
matched := false
for _, b := range builders {
if sa := b.MatchingAnnotations(secret); len(sa) > 0 {
matched = true
args = append(args, sa...)
}
}
if matched {
// While secret names can use RFC1123 DNS subdomain name rules, the volume mount
// name required the stricter DNS label standard, for example no dots anymore.
sanitizedName := dnsLabel1123Forbidden.ReplaceAllString(secret.Name, "-")
name := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("tekton-internal-secret-volume-" + sanitizedName)
volumeMounts = append(volumeMounts, corev1.VolumeMount{
Name: name,
MountPath: credmatcher.VolumeName(secret.Name),
})
volumes = append(volumes, corev1.Volume{
Name: name,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secret.Name,
},
},
})
}
}
if len(args) == 0 {
// There are no creds to initialize.
return nil, nil, nil, nil
}
return args, volumes, volumeMounts, nil
}
// getCredsInitVolume returns a Volume and VolumeMount for /tekton/creds. Each call
// will return a new volume and volume mount. Takes an integer index to append to
// the name of the volume.
func getCredsInitVolume(ctx context.Context, idx int) (*corev1.Volume, *corev1.VolumeMount) {
cfg := config.FromContextOrDefaults(ctx)
if cfg != nil && cfg.FeatureFlags != nil && cfg.FeatureFlags.DisableCredsInit {
return nil, nil
}
name := fmt.Sprintf("%s-%d", credsInitHomeMountPrefix, idx)
v := corev1.Volume{
Name: name,
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
}},
}
vm := corev1.VolumeMount{
Name: name,
MountPath: pipeline.CredsDir,
}
return &v, &vm
}
// checkGitSSHSecret requires `known_host` field must be included in Git SSH Secret when feature flag
// `require-git-ssh-secret-known-hosts` is true.
func checkGitSSHSecret(ctx context.Context, secret *corev1.Secret) error {
cfg := config.FromContextOrDefaults(ctx)
if secret.Type == corev1.SecretTypeSSHAuth && cfg.FeatureFlags.RequireGitSSHSecretKnownHosts {
if _, ok := secret.Data[sshKnownHosts]; !ok {
return errors.New("TaskRun validation failed. Git SSH Secret must have \"known_hosts\" included " +
"when feature flag \"require-git-ssh-secret-known-hosts\" is set to true")
}
}
return nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"path/filepath"
"strconv"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"gomodules.xyz/jsonpatch/v2"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
)
const (
binVolumeName = "tekton-internal-bin"
binDir = "/tekton/bin"
entrypointBinary = binDir + "/entrypoint"
runVolumeName = "tekton-internal-run"
// RunDir is the directory that contains runtime variable data for TaskRuns.
// This includes files for handling container ordering, exit status codes, and more.
// See [https://github.com/tektoncd/pipeline/blob/main/docs/developers/taskruns.md#tekton]
// for more details.
RunDir = "/tekton/run"
downwardVolumeName = "tekton-internal-downward"
downwardMountPoint = "/tekton/downward"
terminationPath = "/tekton/termination"
downwardMountReadyFile = "ready"
readyAnnotation = "tekton.dev/ready"
readyAnnotationValue = "READY"
stepPrefix = "step-"
sidecarPrefix = "sidecar-"
downwardMountCancelFile = "cancel"
cancelAnnotation = "tekton.dev/cancel"
cancelAnnotationValue = "CANCEL"
)
var (
// TODO(#1605): Generate volumeMount names, to avoid collisions.
binMount = corev1.VolumeMount{
Name: binVolumeName,
MountPath: binDir,
}
binROMount = corev1.VolumeMount{
Name: binVolumeName,
MountPath: binDir,
ReadOnly: true,
}
binVolume = corev1.Volume{
Name: binVolumeName,
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}
internalStepsMount = corev1.VolumeMount{
Name: "tekton-internal-steps",
MountPath: pipeline.StepsDir,
}
downwardCancelVolumeItem = corev1.DownwardAPIVolumeFile{
Path: downwardMountCancelFile,
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: fmt.Sprintf("metadata.annotations['%s']", cancelAnnotation),
},
}
// TODO(#1605): Signal sidecar readiness by injecting entrypoint,
// remove dependency on Downward API.
downwardVolume = corev1.Volume{
Name: downwardVolumeName,
VolumeSource: corev1.VolumeSource{
DownwardAPI: &corev1.DownwardAPIVolumeSource{
Items: []corev1.DownwardAPIVolumeFile{{
Path: downwardMountReadyFile,
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: fmt.Sprintf("metadata.annotations['%s']", readyAnnotation),
},
}},
},
},
}
downwardMount = corev1.VolumeMount{
Name: downwardVolumeName,
MountPath: downwardMountPoint,
// Marking this volume mount readonly is technically redundant,
// since the volume itself is readonly, but including for completeness.
ReadOnly: true,
}
// DownwardMountCancelFile is cancellation file mount to step, entrypoint will check this file to cancel the step.
DownwardMountCancelFile = filepath.Join(downwardMountPoint, downwardMountCancelFile)
)
// orderContainers returns the specified steps, modified so that they are
// executed in order by overriding the entrypoint binary.
//
// Containers must have Command specified; if the user didn't specify a
// command, we must have fetched the image's ENTRYPOINT before calling this
// method, using entrypoint_lookup.go.
// Additionally, Step timeouts are added as entrypoint flag.
func orderContainers(ctx context.Context, commonExtraEntrypointArgs []string, steps []corev1.Container, taskSpec *v1.TaskSpec, breakpointConfig *v1.TaskRunDebug, waitForReadyAnnotation, enableKeepPodOnCancel bool) ([]corev1.Container, error) {
if len(steps) == 0 {
return nil, errors.New("no steps specified")
}
for i, s := range steps {
var argsForEntrypoint = []string{}
idx := strconv.Itoa(i)
if i == 0 {
if waitForReadyAnnotation {
argsForEntrypoint = append(argsForEntrypoint,
// First step waits for the Downward volume file.
"-wait_file", filepath.Join(downwardMountPoint, downwardMountReadyFile),
"-wait_file_content", // Wait for file contents, not just an empty file.
)
}
} else { // Not the first step - wait for previous
argsForEntrypoint = append(argsForEntrypoint, "-wait_file", filepath.Join(RunDir, strconv.Itoa(i-1), "out"))
}
argsForEntrypoint = append(argsForEntrypoint,
// Start next step.
"-post_file", filepath.Join(RunDir, idx, "out"),
"-termination_path", terminationPath,
"-step_metadata_dir", filepath.Join(RunDir, idx, "status"),
)
argsForEntrypoint = append(argsForEntrypoint, commonExtraEntrypointArgs...)
if taskSpec != nil {
if taskSpec.Steps != nil && len(taskSpec.Steps) >= i+1 {
if taskSpec.Steps[i].OnError != "" {
if taskSpec.Steps[i].OnError != v1.Continue && taskSpec.Steps[i].OnError != v1.StopAndFail {
return nil, fmt.Errorf("task step onError must be either \"%s\" or \"%s\" but it is set to an invalid value \"%s\"",
v1.Continue, v1.StopAndFail, taskSpec.Steps[i].OnError)
}
argsForEntrypoint = append(argsForEntrypoint, "-on_error", string(taskSpec.Steps[i].OnError))
}
if taskSpec.Steps[i].Timeout != nil {
argsForEntrypoint = append(argsForEntrypoint, "-timeout", taskSpec.Steps[i].Timeout.Duration.String())
}
if taskSpec.Steps[i].StdoutConfig != nil {
argsForEntrypoint = append(argsForEntrypoint, "-stdout_path", taskSpec.Steps[i].StdoutConfig.Path)
}
if taskSpec.Steps[i].StderrConfig != nil {
argsForEntrypoint = append(argsForEntrypoint, "-stderr_path", taskSpec.Steps[i].StderrConfig.Path)
}
// add step results
stepResultArgs := stepResultArgument(taskSpec.Steps[i].Results)
argsForEntrypoint = append(argsForEntrypoint, stepResultArgs...)
if len(taskSpec.Steps[i].When) > 0 {
// marshal and pass to the entrypoint and unmarshal it there.
marshal, err := json.Marshal(taskSpec.Steps[i].When)
if err != nil {
return nil, fmt.Errorf("faile to resolve when %w", err)
}
argsForEntrypoint = append(argsForEntrypoint, "--when_expressions", string(marshal))
}
}
argsForEntrypoint = append(argsForEntrypoint, resultArgument(steps, taskSpec.Results)...)
}
if breakpointConfig != nil && breakpointConfig.NeedsDebugOnFailure() {
argsForEntrypoint = append(argsForEntrypoint, "-breakpoint_on_failure")
}
if breakpointConfig != nil && breakpointConfig.NeedsDebugBeforeStep(s.Name) {
argsForEntrypoint = append(argsForEntrypoint, "-debug_before_step")
}
cmd, args := s.Command, s.Args
if len(cmd) > 0 {
argsForEntrypoint = append(argsForEntrypoint, "-entrypoint", cmd[0])
}
if len(cmd) > 1 {
args = append(cmd[1:], args...)
}
argsForEntrypoint = append(argsForEntrypoint, "--")
argsForEntrypoint = append(argsForEntrypoint, args...)
steps[i].Command = []string{entrypointBinary}
steps[i].Args = argsForEntrypoint
steps[i].TerminationMessagePath = terminationPath
if (i == 0 && waitForReadyAnnotation) || enableKeepPodOnCancel {
// Mount the Downward volume into the first step container.
// if enableKeepPodOnCancel is true, mount the Downward volume into all the steps.
steps[i].VolumeMounts = append(steps[i].VolumeMounts, downwardMount)
}
}
return steps, nil
}
// stepResultArgument creates the cli arguments for step results to the entrypointer.
func stepResultArgument(stepResults []v1.StepResult) []string {
if len(stepResults) == 0 {
return nil
}
stepResultNames := []string{}
for _, r := range stepResults {
stepResultNames = append(stepResultNames, r.Name)
}
return []string{"-step_results", strings.Join(stepResultNames, ",")}
}
func resultArgument(steps []corev1.Container, results []v1.TaskResult) []string {
if len(results) == 0 {
return nil
}
return []string{"-results", collectResultsName(results)}
}
func collectResultsName(results []v1.TaskResult) string {
var resultNames []string
for _, r := range results {
if r.Value == nil {
resultNames = append(resultNames, r.Name)
}
}
return strings.Join(resultNames, ",")
}
var replaceReadyPatchBytes, replaceCancelPatchBytes []byte
func init() {
// https://stackoverflow.com/questions/55573724/create-a-patch-to-add-a-kubernetes-annotation
readyAnnotationPath := "/metadata/annotations/" + strings.Replace(readyAnnotation, "/", "~1", 1)
var err error
replaceReadyPatchBytes, err = json.Marshal([]jsonpatch.JsonPatchOperation{{
Operation: "replace",
Path: readyAnnotationPath,
Value: readyAnnotationValue,
}})
if err != nil {
log.Fatalf("failed to marshal replace ready patch bytes: %v", err)
}
cancelAnnotationPath := "/metadata/annotations/" + strings.Replace(cancelAnnotation, "/", "~1", 1)
replaceCancelPatchBytes, err = json.Marshal([]jsonpatch.JsonPatchOperation{{
Operation: "replace",
Path: cancelAnnotationPath,
Value: cancelAnnotationValue,
}})
if err != nil {
log.Fatalf("failed to marshal replace cancel patch bytes: %v", err)
}
}
// buildSidecarStopPatch creates a JSON Patch to replace sidecar container images with nop image
func buildSidecarStopPatch(pod *corev1.Pod, nopImage string, ctx context.Context) ([]byte, error) {
var patchOps []jsonpatch.JsonPatchOperation
// Iterate over container statuses to find running sidecars
for _, s := range pod.Status.ContainerStatuses {
// If the results-from is set to sidecar logs,
// a sidecar container with name `sidecar-log-results` is injected by the reconciler.
// Do not kill this sidecar. Let it exit gracefully.
if config.FromContextOrDefaults(ctx).FeatureFlags.ResultExtractionMethod == config.ResultExtractionMethodSidecarLogs && s.Name == pipeline.ReservedResultsSidecarContainerName {
continue
}
// Stop any running container that isn't a step.
// An injected sidecar container might not have the
// "sidecar-" prefix, so we can't just look for that prefix.
if !IsContainerStep(s.Name) && s.State.Running != nil {
// Find the corresponding container in the spec by name to get the correct index
for i, c := range pod.Spec.Containers {
if c.Name == s.Name && c.Image != nopImage {
patchOps = append(patchOps, jsonpatch.JsonPatchOperation{
Operation: "replace",
Path: fmt.Sprintf("/spec/containers/%d/image", i),
Value: nopImage,
})
break
}
}
}
}
if len(patchOps) == 0 {
return nil, nil
}
return json.Marshal(patchOps)
}
// CancelPod cancels the pod
func CancelPod(ctx context.Context, kubeClient kubernetes.Interface, namespace, podName string) error {
// PATCH the Pod's annotations to replace the cancel annotation with the
// "CANCEL" value, to signal the pod to be cancelled.
_, err := kubeClient.CoreV1().Pods(namespace).Patch(ctx, podName, types.JSONPatchType, replaceCancelPatchBytes, metav1.PatchOptions{})
return err
}
// UpdateReady updates the Pod's annotations to signal the first step to start
// by projecting the ready annotation via the Downward API.
func UpdateReady(ctx context.Context, kubeclient kubernetes.Interface, pod corev1.Pod) error {
// Don't PATCH if the annotation is already Ready.
if pod.Annotations[readyAnnotation] == readyAnnotationValue {
return nil
}
// PATCH the Pod's annotations to replace the ready annotation with the
// "READY" value, to signal the first step to start.
_, err := kubeclient.CoreV1().Pods(pod.Namespace).Patch(ctx, pod.Name, types.JSONPatchType, replaceReadyPatchBytes, metav1.PatchOptions{})
return err
}
// StopSidecars updates sidecar containers in the Pod to a nop image, which
// exits successfully immediately.
func StopSidecars(ctx context.Context, nopImage string, kubeclient kubernetes.Interface, namespace, name string) (*corev1.Pod, error) {
pod, err := kubeclient.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if k8serrors.IsNotFound(err) {
// return NotFound as-is, since the K8s error checks don't handle wrapping.
return nil, err
} else if err != nil {
return nil, fmt.Errorf("error getting Pod %q when stopping sidecars: %w", name, err)
}
// Only attempt to stop sidecars if pod is running
if pod.Status.Phase != corev1.PodRunning {
return pod, nil
}
// Build JSON Patch operations to replace sidecar images
patchBytes, err := buildSidecarStopPatch(pod, nopImage, ctx)
if err != nil {
return nil, fmt.Errorf("error building patch for stopping sidecars of Pod %q: %w", name, err)
}
// If no sidecars need to be stopped, return early
if patchBytes == nil {
return pod, nil
}
// PATCH the Pod's container images to stop sidecars, using the same pattern as UpdateReady and CancelPod
patchedPod, err := kubeclient.CoreV1().Pods(namespace).Patch(ctx, name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return nil, fmt.Errorf("error stopping sidecars of Pod %q: %w", name, err)
}
return patchedPod, nil
}
// IsSidecarStatusRunning determines if any SidecarStatus on a TaskRun
// is still running.
func IsSidecarStatusRunning(tr *v1.TaskRun) bool {
for _, sidecar := range tr.Status.Sidecars {
if sidecar.Terminated == nil {
return true
}
}
return false
}
// IsContainerStep returns true if the container name indicates that it
// represents a step.
func IsContainerStep(name string) bool { return strings.HasPrefix(name, stepPrefix) }
// IsContainerSidecar returns true if the container name indicates that it
// represents a sidecar.
func IsContainerSidecar(name string) bool { return strings.HasPrefix(name, sidecarPrefix) }
// TrimStepPrefix returns the container name, stripped of its step prefix.
func TrimStepPrefix(name string) string { return strings.TrimPrefix(name, stepPrefix) }
// TrimSidecarPrefix returns the container name, stripped of its sidecar
// prefix.
func TrimSidecarPrefix(name string) string { return strings.TrimPrefix(name, sidecarPrefix) }
// StepName returns the step name after adding "step-" prefix to the actual step name or
// returns "step-unnamed-<step-index>" if not specified
func StepName(name string, i int) string {
if name != "" {
return GetContainerName(name)
}
return fmt.Sprintf("%sunnamed-%d", stepPrefix, i)
}
// GetContainerName prefixes the input name with "step-"
func GetContainerName(name string) string {
return fmt.Sprintf("%s%s", stepPrefix, name)
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"context"
"encoding/json"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
corev1 "k8s.io/api/core/v1"
)
// EntrypointCache looks up an image's entrypoint (command) in a container
// image registry, possibly using the given service account's credentials.
type EntrypointCache interface {
// get the Image data for the given image reference. If the value is
// not found in the cache, it will be fetched from the image registry,
// possibly using K8s service account imagePullSecrets.
//
// It also returns the digest associated with the given reference. If
// the reference referred to an index, the returned digest will be the
// index's digest, not any platform-specific image contained by the
// index.
get(ctx context.Context, ref name.Reference, namespace, serviceAccountName string, imagePullSecrets []corev1.LocalObjectReference, hasArgs bool) (*imageData, error)
}
// imageData contains information looked up about an image or multi-platform image index.
type imageData struct {
digest v1.Hash
commands map[string][]string // map of platform -> []command
}
// resolveEntrypoints looks up container image ENTRYPOINTs for all steps that
// don't specify a Command.
//
// Images that are not specified by digest will be specified by digest after
// lookup in the resulting list of containers.
func resolveEntrypoints(ctx context.Context, cache EntrypointCache, namespace, serviceAccountName string, imagePullSecrets []corev1.LocalObjectReference, steps []corev1.Container) ([]corev1.Container, error) {
// Keep a local cache of name->imageData lookups, just for the scope of
// resolving this set of steps. If the image is pushed to before the
// next run, we need to resolve its digest and commands again, but we
// can skip lookups while resolving the same TaskRun.
localCache := map[name.Reference]imageData{}
for i, s := range steps {
// If the command is already specified, there's nothing to resolve.
if len(s.Command) > 0 {
continue
}
hasArgs := len(s.Args) > 0
ref, err := name.ParseReference(s.Image, name.WeakValidation)
if err != nil {
return nil, err
}
var id imageData
if cid, found := localCache[ref]; found {
id = cid
} else {
// Look it up for real.
lid, err := cache.get(ctx, ref, namespace, serviceAccountName, imagePullSecrets, hasArgs)
if err != nil {
return nil, err
}
id = *lid
// Cache it locally in case another step in this task specifies the same image.
localCache[ref] = *lid
}
// Resolve the original reference to a reference by digest.
steps[i].Image = ref.Context().Digest(id.digest.String()).String()
// Encode the map of platform->command to JSON and pass it via env var.
b, err := json.Marshal(id.commands)
if err != nil {
return nil, err
}
steps[i].Env = append(steps[i].Env, corev1.EnvVar{
Name: "TEKTON_PLATFORM_COMMANDS",
Value: string(b),
})
}
return steps, nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"context"
"errors"
"fmt"
"github.com/google/go-containerregistry/pkg/authn/k8schain"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
lru "github.com/hashicorp/golang-lru"
"github.com/tektoncd/pipeline/pkg/platforms"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
)
const cacheSize = 1024
type entrypointCache struct {
kubeclient kubernetes.Interface
lru *lru.Cache // cache of digest->map[string][]string commands
}
// NewEntrypointCache returns a new entrypoint cache implementation that uses
// K8s credentials to pull image metadata from a container image registry.
func NewEntrypointCache(kubeclient kubernetes.Interface) (EntrypointCache, error) {
lru, err := lru.New(cacheSize)
if err != nil {
return nil, err
}
return &entrypointCache{
kubeclient: kubeclient,
lru: lru,
}, nil
}
// Get gets the image from the cache for the given ref, namespace, and SA.
//
// It also returns the digest associated with the given reference. If the
// reference referred to an index, the returned digest will be the index's
// digest, not any platform-specific image contained by the index.
func (e *entrypointCache) get(ctx context.Context, ref name.Reference, namespace, serviceAccountName string, imagePullSecrets []corev1.LocalObjectReference, hasArgs bool) (*imageData, error) {
// If image is specified by digest, check the local cache.
if digest, ok := ref.(name.Digest); ok {
if id, ok := e.lru.Get(digest.String()); ok {
return id.(*imageData), nil
}
}
pullSecretsNames := make([]string, 0, len(imagePullSecrets))
for _, ps := range imagePullSecrets {
pullSecretsNames = append(pullSecretsNames, ps.Name)
}
// Consult the remote registry, using imagePullSecrets.
kc, err := k8schain.New(ctx, e.kubeclient, k8schain.Options{
Namespace: namespace,
ServiceAccountName: serviceAccountName,
ImagePullSecrets: pullSecretsNames,
})
if err != nil {
return nil, fmt.Errorf("error creating k8schain: %w", err)
}
desc, err := remote.Get(ref, remote.WithAuthFromKeychain(kc))
if err != nil {
return nil, err
}
// Check the cache for this ref@digest, in case we've seen it before.
// This saves looking up each constinuent image's commands if we've seen
// the multi-platform image before.
refByDigest := ref.Context().Digest(desc.Digest.String()).String()
if id, ok := e.lru.Get(refByDigest); ok {
return id.(*imageData), nil
}
id := &imageData{
digest: desc.Digest,
commands: map[string][]string{},
}
switch {
case desc.MediaType.IsImage():
img, err := desc.Image()
if err != nil {
return nil, err
}
ep, plat, err := imageInfo(img, hasArgs)
if err != nil {
return nil, err
}
id.commands[plat] = ep
case desc.MediaType.IsIndex():
idx, err := desc.ImageIndex()
if err != nil {
return nil, err
}
id.commands, err = buildCommandMap(idx, hasArgs)
if err != nil {
return nil, err
}
default:
return nil, errors.New("unsupported media type for image reference")
}
// Cache the digest->commands for future lookup.
e.lru.Add(refByDigest, id)
return id, nil
}
func buildCommandMap(idx v1.ImageIndex, hasArgs bool) (map[string][]string, error) {
// Map platform strings to digest, to handle some ~malformed images
// that specify the same manifest multiple times.
platToDigest := map[string]v1.Hash{}
cmds := map[string][]string{}
mf, err := idx.IndexManifest()
if err != nil {
return nil, err
}
for _, desc := range mf.Manifests {
plat := desc.Platform.String()
// skip unknown platforms.
// Docker uses these to store attestation data: https://docs.docker.com/build/attestations/attestation-storage/#examples
if plat == "unknown/unknown" {
continue
}
if got, found := platToDigest[plat]; found && got != desc.Digest {
return nil, fmt.Errorf("duplicate unique image found for platform: %s: found %s and %s", plat, got, desc.Digest)
}
platToDigest[plat] = desc.Digest
img, err := idx.Image(desc.Digest)
if err != nil {
return nil, err
}
cmds[plat], _, err = imageInfo(img, hasArgs)
if err != nil {
return nil, err
}
}
return cmds, nil
}
func imageInfo(img v1.Image, hasArgs bool) (cmd []string, platform string, err error) {
cf, err := img.ConfigFile()
if err != nil {
return nil, "", err
}
ep := cf.Config.Entrypoint
if len(ep) == 0 {
ep = cf.Config.Cmd
} else if !hasArgs {
// If no args, join Cmd to Entrypoint
ep = append(ep, cf.Config.Cmd...)
}
platformObj := platforms.NewPlatform()
platformObj.OS = cf.OS
platformObj.Architecture = cf.Architecture
// A single image's config metadata doesn't include the CPU
// architecture variant, but we'll assume this is okay since
// the runtime node's image selection will also select the same
// image. This will only be a problem if the image is a
// single-platform image that happens to specify a variant, and
// the runtime node it gets assigned to has a value for
// runtime.GOARM.
platform = platformObj.Format()
return ep, platform, nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"context"
"encoding/json"
"fmt"
"log"
"math"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/tektoncd/pipeline/internal/artifactref"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/internal/computeresources/tasklevel"
"github.com/tektoncd/pipeline/pkg/names"
tknreconciler "github.com/tektoncd/pipeline/pkg/reconciler"
"github.com/tektoncd/pipeline/pkg/spire"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes"
"k8s.io/utils/strings/slices"
"knative.dev/pkg/changeset"
"knative.dev/pkg/kmap"
"knative.dev/pkg/kmeta"
)
const (
// TektonHermeticEnvVar is the env var we set in containers to indicate they should be run hermetically
TektonHermeticEnvVar = "TEKTON_HERMETIC"
// ExecutionModeAnnotation is an experimental optional annotation to set the execution mode on a TaskRun
ExecutionModeAnnotation = "experimental.tekton.dev/execution-mode"
// ExecutionModeHermetic indicates hermetic execution mode
ExecutionModeHermetic = "hermetic"
// deadlineFactor is the factor we multiply the taskrun timeout with to determine the activeDeadlineSeconds of the Pod.
// It has to be higher than the timeout (to not be killed before)
deadlineFactor = 1.5
// SpiffeCsiDriver is the CSI storage plugin needed for injection of SPIFFE workload api.
SpiffeCsiDriver = "csi.spiffe.io"
// OsSelectorLabel is the label Kubernetes uses for OS-specific workloads (https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetes-io-os)
OsSelectorLabel = "kubernetes.io/os"
// TerminationReasonTimeoutExceeded indicates a step execution timed out.
TerminationReasonTimeoutExceeded = "TimeoutExceeded"
// TerminationReasonSkipped indicates a step execution was skipped due to previous step failed.
TerminationReasonSkipped = "Skipped"
// TerminationReasonContinued indicates a step errored but was ignored since onError was set to continue.
TerminationReasonContinued = "Continued"
// TerminationReasonCancelled indicates a step was cancelled.
TerminationReasonCancelled = "Cancelled"
StepArtifactPathPattern = "step.artifacts.path"
// K8s version to determine if to use native k8s sidecar or Tekton sidecar
SidecarK8sMinorVersionCheck = 29
)
// These are effectively const, but Go doesn't have such an annotation.
var (
ReleaseAnnotation = "pipeline.tekton.dev/release"
groupVersionKind = schema.GroupVersionKind{
Group: v1.SchemeGroupVersion.Group,
Version: v1.SchemeGroupVersion.Version,
Kind: "TaskRun",
}
// These are injected into all of the source/step containers.
implicitVolumeMounts = []corev1.VolumeMount{{
Name: "tekton-internal-workspace",
MountPath: pipeline.WorkspaceDir,
}, {
Name: "tekton-internal-home",
MountPath: pipeline.HomeDir,
}, {
Name: "tekton-internal-results",
MountPath: pipeline.DefaultResultPath,
}, {
Name: "tekton-internal-steps",
MountPath: pipeline.StepsDir,
ReadOnly: true,
}, {
Name: "tekton-internal-artifacts",
MountPath: pipeline.ArtifactsDir,
}}
implicitVolumes = []corev1.Volume{{
Name: "tekton-internal-workspace",
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}, {
Name: "tekton-internal-home",
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}, {
Name: "tekton-internal-results",
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}, {
Name: "tekton-internal-steps",
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}, {
Name: "tekton-internal-artifacts",
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}}
// MaxActiveDeadlineSeconds is a maximum permitted value to be used for a task with no timeout
MaxActiveDeadlineSeconds = int64(math.MaxInt32)
)
// Builder exposes options to configure Pod construction from TaskSpecs/Runs.
type Builder struct {
Images pipeline.Images
KubeClient kubernetes.Interface
EntrypointCache EntrypointCache
}
// Transformer is a function that will transform a Pod. This can be used to mutate
// a Pod generated by Tekton after it got generated.
type Transformer func(*corev1.Pod) (*corev1.Pod, error)
// Build creates a Pod using the configuration options set on b and the TaskRun
// and TaskSpec provided in its arguments. An error is returned if there are
// any problems during the conversion.
func (b *Builder) Build(ctx context.Context, taskRun *v1.TaskRun, taskSpec v1.TaskSpec, transformers ...Transformer) (*corev1.Pod, error) {
var (
scriptsInit *corev1.Container
initContainers, stepContainers, sidecarContainers []corev1.Container
volumes []corev1.Volume
)
volumeMounts := []corev1.VolumeMount{binROMount}
implicitEnvVars := []corev1.EnvVar{}
featureFlags := config.FromContextOrDefaults(ctx).FeatureFlags
defaultForbiddenEnv := config.FromContextOrDefaults(ctx).Defaults.DefaultForbiddenEnv
alphaAPIEnabled := featureFlags.EnableAPIFields == config.AlphaAPIFields
sidecarLogsResultsEnabled := config.FromContextOrDefaults(ctx).FeatureFlags.ResultExtractionMethod == config.ResultExtractionMethodSidecarLogs
enableKeepPodOnCancel := featureFlags.EnableKeepPodOnCancel
setSecurityContext := config.FromContextOrDefaults(ctx).FeatureFlags.SetSecurityContext
setSecurityContextReadOnlyRootFilesystem := config.FromContextOrDefaults(ctx).FeatureFlags.SetSecurityContextReadOnlyRootFilesystem
defaultManagedByLabelValue := config.FromContextOrDefaults(ctx).Defaults.DefaultManagedByLabelValue
// Add our implicit volumes first, so they can be overridden by the user if they prefer.
volumes = append(volumes, implicitVolumes...)
volumeMounts = append(volumeMounts, implicitVolumeMounts...)
// Create Volumes and VolumeMounts for any credentials found in annotated
// Secrets, along with any arguments needed by Step entrypoints to process
// those secrets.
commonExtraEntrypointArgs := []string{}
// Entrypoint arg to enable or disable spire
if config.IsSpireEnabled(ctx) {
commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, "-enable_spire")
}
credEntrypointArgs, credVolumes, credVolumeMounts, err := credsInit(ctx, taskRun, taskRun.Spec.ServiceAccountName, taskRun.Namespace, b.KubeClient)
if err != nil {
return nil, err
}
commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, credEntrypointArgs...)
volumes = append(volumes, credVolumes...)
volumeMounts = append(volumeMounts, credVolumeMounts...)
// Merge step template with steps.
// TODO(#1605): Move MergeSteps to pkg/pod
steps, err := v1.MergeStepsWithStepTemplate(taskSpec.StepTemplate, taskSpec.Steps)
if err != nil {
return nil, err
}
steps, err = v1.MergeStepsWithSpecs(steps, taskRun.Spec.StepSpecs)
if err != nil {
return nil, err
}
if taskRun.Spec.ComputeResources != nil {
tasklevel.ApplyTaskLevelComputeResources(steps, taskRun.Spec.ComputeResources)
}
securityContextConfig := SecurityContextConfig{
SetSecurityContext: setSecurityContext,
SetReadOnlyRootFilesystem: setSecurityContextReadOnlyRootFilesystem,
}
windows := usesWindows(taskRun)
pollingInterval := config.FromContextOrDefaults(ctx).Defaults.DefaultSidecarLogPollingInterval
if sidecarLogsResultsEnabled {
if taskSpec.Results != nil || artifactsPathReferenced(steps) {
// create a results sidecar
resultsSidecar, err := createResultsSidecar(taskSpec, b.Images.SidecarLogResultsImage, securityContextConfig, windows, pollingInterval)
if err != nil {
return nil, err
}
taskSpec.Sidecars = append(taskSpec.Sidecars, resultsSidecar)
commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, "-result_from", config.ResultExtractionMethodSidecarLogs)
}
}
sidecars, err := v1.MergeSidecarsWithSpecs(taskSpec.Sidecars, taskRun.Spec.SidecarSpecs)
if err != nil {
return nil, err
}
initContainers = []corev1.Container{
entrypointInitContainer(b.Images.EntrypointImage, steps, securityContextConfig, windows),
}
// Convert any steps with Script to command+args.
// If any are found, append an init container to initialize scripts.
if alphaAPIEnabled {
scriptsInit, stepContainers, sidecarContainers = convertScripts(b.Images.ShellImage, b.Images.ShellImageWin, steps, sidecars, taskRun.Spec.Debug, securityContextConfig)
} else {
scriptsInit, stepContainers, sidecarContainers = convertScripts(b.Images.ShellImage, "", steps, sidecars, nil, securityContextConfig)
}
if scriptsInit != nil {
initContainers = append(initContainers, *scriptsInit)
volumes = append(volumes, scriptsVolume)
}
if alphaAPIEnabled && taskRun.Spec.Debug != nil && taskRun.Spec.Debug.NeedsDebug() {
volumes = append(volumes, debugScriptsVolume, debugInfoVolume)
}
// Initialize any workingDirs under /workspace.
if workingDirInit := workingDirInit(b.Images.WorkingDirInitImage, stepContainers, securityContextConfig, windows); workingDirInit != nil {
initContainers = append(initContainers, *workingDirInit)
}
// By default, use an empty pod template and take the one defined in the task run spec if any
podTemplate := pod.Template{}
if taskRun.Spec.PodTemplate != nil {
podTemplate = *taskRun.Spec.PodTemplate
}
// Resolve entrypoint for any steps that don't specify command.
stepContainers, err = resolveEntrypoints(ctx, b.EntrypointCache, taskRun.Namespace, taskRun.Spec.ServiceAccountName, podTemplate.ImagePullSecrets, stepContainers)
if err != nil {
return nil, err
}
readyImmediately := isPodReadyImmediately(*featureFlags, taskSpec.Sidecars)
if alphaAPIEnabled {
stepContainers, err = orderContainers(ctx, commonExtraEntrypointArgs, stepContainers, &taskSpec, taskRun.Spec.Debug, !readyImmediately, enableKeepPodOnCancel)
} else {
stepContainers, err = orderContainers(ctx, commonExtraEntrypointArgs, stepContainers, &taskSpec, nil, !readyImmediately, enableKeepPodOnCancel)
}
if err != nil {
return nil, err
}
volumes = append(volumes, binVolume)
if !readyImmediately || enableKeepPodOnCancel {
downwardVolumeDup := downwardVolume.DeepCopy()
if enableKeepPodOnCancel {
downwardVolumeDup.VolumeSource.DownwardAPI.Items = append(downwardVolumeDup.VolumeSource.DownwardAPI.Items, downwardCancelVolumeItem)
}
volumes = append(volumes, *downwardVolumeDup)
}
// Order of precedence for envs
// implicit env vars
// Superceded by step env vars
// Superceded by config-default default-pod-template envs
// Superceded by podTemplate envs
if len(implicitEnvVars) > 0 {
for i, s := range stepContainers {
env := append(implicitEnvVars, s.Env...) //nolint:gocritic
stepContainers[i].Env = env
}
}
filteredEnvs := []corev1.EnvVar{}
for _, e := range podTemplate.Env {
if !slices.Contains(defaultForbiddenEnv, e.Name) {
filteredEnvs = append(filteredEnvs, e)
}
}
if len(podTemplate.Env) > 0 {
for i, s := range stepContainers {
env := append(s.Env, filteredEnvs...) //nolint:gocritic
stepContainers[i].Env = env
}
}
// Add env var if hermetic execution was requested & if the alpha API is enabled
if taskRun.Annotations[ExecutionModeAnnotation] == ExecutionModeHermetic && alphaAPIEnabled {
for i, s := range stepContainers {
// Add it at the end so it overrides
env := append(s.Env, corev1.EnvVar{Name: TektonHermeticEnvVar, Value: "1"}) //nolint:gocritic
stepContainers[i].Env = env
}
}
// Add implicit volume mounts to each step, unless the step specifies
// its own volume mount at that path.
for i, s := range stepContainers {
// Mount /tekton/creds with a fresh volume for each Step. It needs to
// be world-writeable and empty so creds can be initialized in there. Cant
// guarantee what UID container runs with. If legacy credential helper (creds-init)
// is disabled via feature flag then these can be nil since we don't want to mount
// the automatic credential volume.
v, vm := getCredsInitVolume(ctx, i)
if v != nil && vm != nil {
volumes = append(volumes, *v)
s.VolumeMounts = append(s.VolumeMounts, *vm)
}
// Add /tekton/run state volumes.
// Each step should only mount their own volume as RW,
// all other steps should be mounted RO.
volumes = append(volumes, runVolume(i))
for j := range stepContainers {
s.VolumeMounts = append(s.VolumeMounts, runMount(j, i != j))
}
requestedVolumeMounts := map[string]bool{}
for _, vm := range s.VolumeMounts {
requestedVolumeMounts[filepath.Clean(vm.MountPath)] = true
}
var toAdd []corev1.VolumeMount
for _, imp := range volumeMounts {
if !requestedVolumeMounts[filepath.Clean(imp.MountPath)] {
toAdd = append(toAdd, imp)
}
}
vms := append(s.VolumeMounts, toAdd...) //nolint:gocritic
stepContainers[i].VolumeMounts = vms
}
if sidecarLogsResultsEnabled {
// Mount implicit volumes onto sidecarContainers
// so that they can access /tekton/results and /tekton/run.
if taskSpec.Results != nil || artifactsPathReferenced(steps) {
for i, s := range sidecarContainers {
if s.Name != pipeline.ReservedResultsSidecarName {
continue
}
for j := range stepContainers {
s.VolumeMounts = append(s.VolumeMounts, runMount(j, true))
}
requestedVolumeMounts := map[string]bool{}
for _, vm := range s.VolumeMounts {
requestedVolumeMounts[filepath.Clean(vm.MountPath)] = true
}
var toAdd []corev1.VolumeMount
for _, imp := range volumeMounts {
if !requestedVolumeMounts[filepath.Clean(imp.MountPath)] {
toAdd = append(toAdd, imp)
}
}
vms := append(s.VolumeMounts, toAdd...) //nolint:gocritic
sidecarContainers[i].VolumeMounts = vms
}
}
}
// This loop:
// - sets container name to add "step-" prefix or "step-unnamed-#" if not specified.
// TODO(#1605): Remove this loop and make each transformation in
// isolation.
for i, s := range stepContainers {
stepContainers[i].Name = names.SimpleNameGenerator.RestrictLength(StepName(s.Name, i))
}
// Add podTemplate Volumes to the explicitly declared use volumes
volumes = append(volumes, taskSpec.Volumes...)
volumes = append(volumes, podTemplate.Volumes...)
if err := v1.ValidateVolumes(volumes); err != nil {
return nil, err
}
readonly := true
if config.IsSpireEnabled(ctx) {
// add SPIRE's CSI volume to the explicitly declared use volumes
volumes = append(volumes, corev1.Volume{
Name: spire.WorkloadAPI,
VolumeSource: corev1.VolumeSource{
CSI: &corev1.CSIVolumeSource{
Driver: SpiffeCsiDriver,
ReadOnly: &readonly,
},
},
})
// mount SPIRE's CSI volume to each Step Container
for i := range stepContainers {
c := &stepContainers[i]
c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{
Name: spire.WorkloadAPI,
MountPath: spire.VolumeMountPath,
ReadOnly: readonly,
})
}
for i := range initContainers {
// mount SPIRE's CSI volume to each Init Container
c := &initContainers[i]
c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{
Name: spire.WorkloadAPI,
MountPath: spire.VolumeMountPath,
ReadOnly: readonly,
})
}
}
mergedPodContainers := stepContainers
mergedPodInitContainers := initContainers
useTektonSidecar := true
if config.FromContextOrDefaults(ctx).FeatureFlags.EnableKubernetesSidecar {
// Go through the logic for enable-kubernetes feature flag
// Kubernetes Version
dc := b.KubeClient.Discovery()
sv, err := dc.ServerVersion()
if err != nil {
return nil, err
}
if IsNativeSidecarSupport(sv) {
// Add RestartPolicy and Merge into initContainer
useTektonSidecar = false
for i := range sidecarContainers {
sc := &sidecarContainers[i]
always := corev1.ContainerRestartPolicyAlways
sc.RestartPolicy = &always
// For the results sidecar specifically, ensure it has the kubernetes-sidecar-mode flag
// to prevent it from exiting and restarting
if sc.Name == pipeline.ReservedResultsSidecarName {
kubernetesSidecarModeFound := false
for j, arg := range sc.Command {
if arg == "-kubernetes-sidecar-mode" && j+1 < len(sc.Command) {
kubernetesSidecarModeFound = true
break
}
}
if !kubernetesSidecarModeFound {
sc.Command = append(sc.Command, "-kubernetes-sidecar-mode", "true")
}
}
sc.Name = names.SimpleNameGenerator.RestrictLength(fmt.Sprintf("%v%v", sidecarPrefix, sc.Name))
mergedPodInitContainers = append(mergedPodInitContainers, *sc)
}
}
}
if useTektonSidecar {
// Merge sidecar containers with step containers.
for _, sc := range sidecarContainers {
sc.Name = names.SimpleNameGenerator.RestrictLength(fmt.Sprintf("%v%v", sidecarPrefix, sc.Name))
mergedPodContainers = append(mergedPodContainers, sc)
}
}
var dnsPolicy corev1.DNSPolicy
if podTemplate.DNSPolicy != nil {
dnsPolicy = *podTemplate.DNSPolicy
}
var priorityClassName string
if podTemplate.PriorityClassName != nil {
priorityClassName = *podTemplate.PriorityClassName
}
podAnnotations := kmap.ExcludeKeys(kmeta.CopyMap(taskRun.Annotations), tknreconciler.KubernetesManagedByAnnotationKey)
podAnnotations[ReleaseAnnotation] = changeset.Get()
if readyImmediately {
podAnnotations[readyAnnotation] = readyAnnotationValue
}
// calculate the activeDeadlineSeconds based on the specified timeout (uses default timeout if it's not specified)
activeDeadlineSeconds := int64(taskRun.GetTimeout(ctx).Seconds() * deadlineFactor)
// set activeDeadlineSeconds to the max. allowed value i.e. max int32 when timeout is explicitly set to 0
if taskRun.GetTimeout(ctx) == config.NoTimeoutDuration {
activeDeadlineSeconds = MaxActiveDeadlineSeconds
}
podNameSuffix := "-pod"
if taskRunRetries := len(taskRun.Status.RetriesStatus); taskRunRetries > 0 {
podNameSuffix = fmt.Sprintf("%s-retry%d", podNameSuffix, taskRunRetries)
}
newPod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
// We execute the build's pod in the same namespace as where the build was
// created so that it can access colocated resources.
Namespace: taskRun.Namespace,
// Generate a unique name based on the build's name.
// The name is univocally generated so that in case of
// stale informer cache, we never create duplicate Pods
Name: kmeta.ChildName(taskRun.Name, podNameSuffix),
// If our parent TaskRun is deleted, then we should be as well.
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(taskRun, groupVersionKind),
},
Annotations: podAnnotations,
Labels: makeLabels(taskRun, defaultManagedByLabelValue),
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: mergedPodInitContainers,
Containers: mergedPodContainers,
ServiceAccountName: taskRun.Spec.ServiceAccountName,
Volumes: volumes,
NodeSelector: podTemplate.NodeSelector,
Tolerations: podTemplate.Tolerations,
Affinity: podTemplate.Affinity,
SecurityContext: podTemplate.SecurityContext,
RuntimeClassName: podTemplate.RuntimeClassName,
AutomountServiceAccountToken: podTemplate.AutomountServiceAccountToken,
SchedulerName: podTemplate.SchedulerName,
HostNetwork: podTemplate.HostNetwork,
DNSPolicy: dnsPolicy,
DNSConfig: podTemplate.DNSConfig,
EnableServiceLinks: podTemplate.EnableServiceLinks,
PriorityClassName: priorityClassName,
ImagePullSecrets: podTemplate.ImagePullSecrets,
HostAliases: podTemplate.HostAliases,
TopologySpreadConstraints: podTemplate.TopologySpreadConstraints,
ActiveDeadlineSeconds: &activeDeadlineSeconds, // Set ActiveDeadlineSeconds to mark the pod as "terminating" (like a Job)
},
}
for _, f := range transformers {
newPod, err = f(newPod)
if err != nil {
return newPod, err
}
}
return newPod, nil
}
// makeLabels constructs the labels we will propagate from TaskRuns to Pods.
func makeLabels(s *v1.TaskRun, defaultManagedByLabelValue string) map[string]string {
labels := make(map[string]string, len(s.ObjectMeta.Labels)+1)
// NB: Set this *before* passing through TaskRun labels. If the TaskRun
// has a managed-by label, it should override this default.
// Copy through the TaskRun's labels to the underlying Pod's.
for k, v := range s.ObjectMeta.Labels {
labels[k] = v
}
// NB: Set this *after* passing through TaskRun Labels. If the TaskRun
// specifies this label, it should be overridden by this value.
labels[pipeline.TaskRunLabelKey] = s.Name
labels[pipeline.TaskRunUIDLabelKey] = string(s.UID)
// Enforce app.kubernetes.io/managed-by to be the value configured
labels[tknreconciler.KubernetesManagedByAnnotationKey] = defaultManagedByLabelValue
return labels
}
// isPodReadyImmediately returns a bool indicating whether the
// controller should consider the Pod "Ready" as soon as it's deployed.
// This will add the `Ready` annotation when creating the Pod,
// and prevent the first step from waiting for the annotation to appear before starting.
func isPodReadyImmediately(featureFlags config.FeatureFlags, sidecars []v1.Sidecar) bool {
// If the TaskRun has sidecars, we must wait for them
if len(sidecars) > 0 || featureFlags.RunningInEnvWithInjectedSidecars {
if featureFlags.AwaitSidecarReadiness {
return false
}
log.Printf("warning: not waiting for sidecars before starting first Step of Task pod")
}
return true
}
func runMount(i int, ro bool) corev1.VolumeMount {
return corev1.VolumeMount{
Name: fmt.Sprintf("%s-%d", runVolumeName, i),
MountPath: filepath.Join(RunDir, strconv.Itoa(i)),
ReadOnly: ro,
}
}
func runVolume(i int) corev1.Volume {
return corev1.Volume{
Name: fmt.Sprintf("%s-%d", runVolumeName, i),
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}
}
// entrypointInitContainer generates a few init containers based of a set of command (in images), volumes to run, and whether the pod will run on a windows node
// This should effectively merge multiple command and volumes together.
// If setSecurityContext is true, the init container will include a security context
// allowing it to run in namespaces with restriced pod security admission.
func entrypointInitContainer(image string, steps []v1.Step, securityContext SecurityContextConfig, windows bool) corev1.Container {
// Invoke the entrypoint binary in "cp mode" to copy itself
// into the correct location for later steps and initialize steps folder
command := []string{"/ko-app/entrypoint", "init", "/ko-app/entrypoint", entrypointBinary}
for i, s := range steps {
command = append(command, StepName(s.Name, i))
}
volumeMounts := []corev1.VolumeMount{binMount, internalStepsMount}
// Rewrite steps with entrypoint binary. Append the entrypoint init
// container to place the entrypoint binary. Also add timeout flags
// to entrypoint binary.
prepareInitContainer := corev1.Container{
Name: "prepare",
Image: image,
// Rewrite default WorkingDir from "/home/nonroot" to "/"
// as suggested at https://github.com/GoogleContainerTools/distroless/issues/718
// to avoid permission errors with nonroot users not equal to `65532`
WorkingDir: "/",
Command: command,
VolumeMounts: volumeMounts,
}
if securityContext.SetSecurityContext {
prepareInitContainer.SecurityContext = securityContext.GetSecurityContext(windows)
}
return prepareInitContainer
}
// createResultsSidecar creates a sidecar that will run the sidecarlogresults binary,
// based on the spec of the Task, the image that should run in the results sidecar,
// whether it will run on a windows node, and whether the sidecar should include a security context
// that will allow it to run in namespaces with "restricted" pod security admission.
// It will also provide arguments to the binary that allow it to surface the step results.
func createResultsSidecar(taskSpec v1.TaskSpec, image string, securityContext SecurityContextConfig, windows bool, pollingInterval time.Duration) (v1.Sidecar, error) {
names := make([]string, 0, len(taskSpec.Results))
for _, r := range taskSpec.Results {
names = append(names, r.Name)
}
stepNames := make([]string, 0, len(taskSpec.Steps))
var artifactProducerSteps []string
for i, s := range taskSpec.Steps {
stepName := StepName(s.Name, i)
stepNames = append(stepNames, stepName)
if artifactPathReferencedInStep(s) {
artifactProducerSteps = append(artifactProducerSteps, GetContainerName(s.Name))
}
}
resultsStr := strings.Join(names, ",")
command := []string{"/ko-app/sidecarlogresults", "-results-dir", pipeline.DefaultResultPath, "-result-names", resultsStr, "-step-names", strings.Join(artifactProducerSteps, ",")}
// create a map of container Name to step results
stepResults := map[string][]string{}
for i, s := range taskSpec.Steps {
if len(s.Results) > 0 {
stepName := StepName(s.Name, i)
stepResults[stepName] = make([]string, 0, len(s.Results))
for _, r := range s.Results {
stepResults[stepName] = append(stepResults[stepName], r.Name)
}
}
}
stepResultsBytes, err := json.Marshal(stepResults)
if err != nil {
return v1.Sidecar{}, err
}
if len(stepResultsBytes) > 0 {
command = append(command, "-step-results", string(stepResultsBytes))
}
// When using Kubernetes native sidecar support, add the kubernetes-sidecar-mode flag
// to prevent the sidecar from exiting after processing results
if config.FromContextOrDefaults(context.Background()).FeatureFlags.EnableKubernetesSidecar {
command = append(command, "-kubernetes-sidecar-mode", "true")
}
sidecar := v1.Sidecar{
Name: pipeline.ReservedResultsSidecarName,
Image: image,
Command: command,
Env: []corev1.EnvVar{
{
Name: "SIDECAR_LOG_POLLING_INTERVAL",
Value: pollingInterval.String(),
},
},
}
if securityContext.SetSecurityContext {
sidecar.SecurityContext = securityContext.GetSecurityContext(windows)
}
return sidecar, nil
}
// usesWindows returns true if the TaskRun will run on a windows node,
// based on its node selector.
// See https://kubernetes.io/docs/concepts/windows/user-guide/ for more info.
func usesWindows(tr *v1.TaskRun) bool {
if tr.Spec.PodTemplate == nil || tr.Spec.PodTemplate.NodeSelector == nil {
return false
}
osSelector := tr.Spec.PodTemplate.NodeSelector[OsSelectorLabel]
return osSelector == "windows"
}
func artifactsPathReferenced(steps []v1.Step) bool {
for _, step := range steps {
if artifactPathReferencedInStep(step) {
return true
}
}
return false
}
func artifactPathReferencedInStep(step v1.Step) bool {
// `$(step.artifacts.path)` in taskRun.Spec.TaskSpec.Steps and `taskSpec.steps` are substituted when building the pod while when setting status for taskRun
// neither of them is substituted, so we need two forms to check if artifactsPath is referenced in steps.
unresolvedPath := "$(" + artifactref.StepArtifactPathPattern + ")"
path := filepath.Join(pipeline.StepsDir, GetContainerName(step.Name), "artifacts", "provenance.json")
if strings.Contains(step.Script, path) || strings.Contains(step.Script, unresolvedPath) {
return true
}
for _, arg := range step.Args {
if strings.Contains(arg, path) || strings.Contains(arg, unresolvedPath) {
return true
}
}
for _, c := range step.Command {
if strings.Contains(c, path) || strings.Contains(c, unresolvedPath) {
return true
}
}
for _, e := range step.Env {
if strings.Contains(e.Value, path) || strings.Contains(e.Value, unresolvedPath) {
return true
}
}
return false
}
// isNativeSidecarSupport returns true if k8s api has native sidecar support
// based on the k8s version (1.29+).
// See https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/ for more info.
func IsNativeSidecarSupport(serverVersion *version.Info) bool {
minor := strings.TrimSuffix(serverVersion.Minor, "+") // Remove '+' if present
majorInt, _ := strconv.Atoi(serverVersion.Major)
minorInt, _ := strconv.Atoi(minor)
if (majorInt == 1 && minorInt >= SidecarK8sMinorVersionCheck) || majorInt > 1 {
return true
}
return false
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"encoding/base64"
"fmt"
"path/filepath"
"strconv"
"strings"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/names"
corev1 "k8s.io/api/core/v1"
)
const (
scriptsVolumeName = "tekton-internal-scripts"
debugScriptsVolumeName = "tekton-internal-debug-scripts"
debugInfoVolumeName = "tekton-internal-debug-info"
scriptsDir = "/tekton/scripts"
debugScriptsDir = "/tekton/debug/scripts"
defaultScriptPreamble = "#!/bin/sh\nset -e\n"
debugInfoDir = "/tekton/debug/info"
)
var (
// Volume definition attached to Pods generated from TaskRuns that have
// steps that specify a Script.
scriptsVolume = corev1.Volume{
Name: scriptsVolumeName,
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}
scriptsVolumeMount = corev1.VolumeMount{
Name: scriptsVolumeName,
MountPath: scriptsDir,
ReadOnly: true,
}
writeScriptsVolumeMount = corev1.VolumeMount{
Name: scriptsVolumeName,
MountPath: scriptsDir,
ReadOnly: false,
}
debugScriptsVolume = corev1.Volume{
Name: debugScriptsVolumeName,
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}
debugScriptsVolumeMount = corev1.VolumeMount{
Name: debugScriptsVolumeName,
MountPath: debugScriptsDir,
}
debugInfoVolume = corev1.Volume{
Name: debugInfoVolumeName,
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}
)
// convertScripts creates an init container that mounts any Scripts specified by
// the input Steps and Sidecars. It returns the init container, plus two slices of Containers
// representing the Steps and Sidecars, respectively, that use the scripts from the init container.
// Other inputs:
// - shellImageLinux and shellImageWindows: the images that should be used by the init container,
// depending on the OS the Task will run on
// - debugConfig: the TaskRun's debug configuration
// - setSecurityContext: whether the init container should include a security context that will
// allow it to run in a namespace with "restricted" pod security admission
func convertScripts(shellImageLinux string, shellImageWin string, steps []v1.Step, sidecars []v1.Sidecar, debugConfig *v1.TaskRunDebug, securityContext SecurityContextConfig) (*corev1.Container, []corev1.Container, []corev1.Container) {
// Place scripts is an init container used for creating scripts in the
// /tekton/scripts directory which would be later used by the step containers
// as a Command
requiresWindows := checkWindowsRequirement(steps, sidecars)
shellImage := shellImageLinux
shellCommand := "sh"
shellArg := "-c"
// Set windows variants for Image, Command and Args
if requiresWindows {
shellImage = shellImageWin
shellCommand = "pwsh"
shellArg = "-Command"
}
placeScriptsInit := corev1.Container{
Name: "place-scripts",
Image: shellImage,
Command: []string{shellCommand},
Args: []string{shellArg, ""},
VolumeMounts: []corev1.VolumeMount{writeScriptsVolumeMount, binMount},
}
if securityContext.SetSecurityContext {
placeScriptsInit.SecurityContext = securityContext.GetSecurityContext(requiresWindows)
}
// Add mounts for debug
if debugConfig != nil && debugConfig.NeedsDebug() {
placeScriptsInit.VolumeMounts = append(placeScriptsInit.VolumeMounts, debugScriptsVolumeMount)
}
convertedStepContainers := convertListOfSteps(steps, &placeScriptsInit, debugConfig, "script")
sidecarContainers := convertListOfSidecars(sidecars, &placeScriptsInit, "sidecar-script")
if hasScripts(steps, sidecars, debugConfig) {
return &placeScriptsInit, convertedStepContainers, sidecarContainers
}
return nil, convertedStepContainers, sidecarContainers
}
// convertListOfSidecars iterates through the list of sidecars, generates the script file name and heredoc termination string,
// adds an entry to the init container args, sets up the step container to run the script, and sets the volume mounts.
func convertListOfSidecars(sidecars []v1.Sidecar, initContainer *corev1.Container, namePrefix string) []corev1.Container {
containers := []corev1.Container{}
for i, s := range sidecars {
c := s.ToK8sContainer()
if s.Script != "" {
placeScriptInContainer(s.Script, getScriptFile(scriptsDir, fmt.Sprintf("%s-%d", namePrefix, i)), c, initContainer)
}
containers = append(containers, *c)
}
return containers
}
// convertListOfSteps iterates through the list of steps, generates the script file name and heredoc termination string,
// adds an entry to the init container args, sets up the step container to run the script, and sets the volume mounts.
func convertListOfSteps(steps []v1.Step, initContainer *corev1.Container, debugConfig *v1.TaskRunDebug, namePrefix string) []corev1.Container {
containers := []corev1.Container{}
for i, s := range steps {
c := steps[i].ToK8sContainer()
if s.Script != "" {
placeScriptInContainer(s.Script, getScriptFile(scriptsDir, fmt.Sprintf("%s-%d", namePrefix, i)), c, initContainer)
}
containers = append(containers, *c)
}
placeDebugScriptInContainers(containers, initContainer, debugConfig)
return containers
}
func getScriptFile(scriptsDir, scriptName string) string {
return filepath.Join(scriptsDir, names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(scriptName))
}
// placeScriptInContainer given a piece of script to be executed, placeScriptInContainer firstly modifies initContainer
// so that it capsules the target script into scriptFile, then it modifies the container so that it can execute the scriptFile
// in runtime.
func placeScriptInContainer(script, scriptFile string, c *corev1.Container, initContainer *corev1.Container) {
if script == "" {
return
}
cleaned := strings.TrimSpace(script)
hasShebang := strings.HasPrefix(cleaned, "#!")
requiresWindows := strings.HasPrefix(cleaned, "#!win")
if !hasShebang {
script = defaultScriptPreamble + script
}
// Append to the place-scripts script to place the
// script file in a known location in the scripts volume.
if requiresWindows {
command, args, script, scriptFile := extractWindowsScriptComponents(script, scriptFile)
initContainer.Args[1] += fmt.Sprintf(`@"
%s
"@ | Out-File -FilePath %s
`, script, scriptFile)
c.Command = command
// Append existing args field to end of derived args
args = append(args, c.Args...)
c.Args = args
} else {
// Only encode the script for linux scripts
// The decode-script subcommand of the entrypoint does not work under windows
script = encodeScript(script)
heredoc := "_EOF_" // underscores because base64 doesn't include them in its alphabet
initContainer.Args[1] += fmt.Sprintf(`scriptfile="%s"
touch ${scriptfile} && chmod +x ${scriptfile}
cat > ${scriptfile} << '%s'
%s
%s
/tekton/bin/entrypoint decode-script "${scriptfile}"
`, scriptFile, heredoc, script, heredoc)
// Set the command to execute the correct script in the mounted volume.
// A previous merge with stepTemplate may have populated
// Command and Args, even though this is not normally valid, so
// we'll clear out the Args and overwrite Command.
c.Command = []string{scriptFile}
}
c.VolumeMounts = append(c.VolumeMounts, scriptsVolumeMount)
}
// encodeScript encodes a script field into a format that avoids kubernetes' built-in processing of container args,
// which can mangle dollar signs and unexpectedly replace variable references in the user's script.
func encodeScript(script string) string {
return base64.StdEncoding.EncodeToString([]byte(script))
}
// placeDebugScriptInContainers inserts debug scripts into containers. It capsules those scripts to files in initContainer,
// then executes those scripts in target containers.
func placeDebugScriptInContainers(containers []corev1.Container, initContainer *corev1.Container, debugConfig *v1.TaskRunDebug) {
if debugConfig == nil || !debugConfig.NeedsDebug() {
return
}
isDebugOnFailure := debugConfig != nil && debugConfig.NeedsDebugOnFailure()
var needDebugBeforeStep bool
for i := range containers {
debugInfoVolumeMount := corev1.VolumeMount{
Name: debugInfoVolumeName,
MountPath: filepath.Join(debugInfoDir, strconv.Itoa(i)),
}
(&containers[i]).VolumeMounts = append((&containers[i]).VolumeMounts, debugScriptsVolumeMount, debugInfoVolumeMount)
if debugConfig != nil && debugConfig.NeedsDebugBeforeStep(containers[i].Name) {
needDebugBeforeStep = true
}
}
type script struct {
name string
content string
}
debugScripts := make([]script, 0)
if isDebugOnFailure {
debugScripts = append(debugScripts, []script{{
name: "continue",
content: defaultScriptPreamble + fmt.Sprintf(debugContinueScriptTemplate, len(containers), debugInfoDir, RunDir),
}, {
name: "fail-continue",
content: defaultScriptPreamble + fmt.Sprintf(debugFailScriptTemplate, len(containers), debugInfoDir, RunDir),
}}...)
}
if needDebugBeforeStep {
debugScripts = append(debugScripts, []script{{
name: "beforestep-continue",
content: defaultScriptPreamble + fmt.Sprintf(debugBeforeStepContinueScriptTemplate, len(containers), debugInfoDir, RunDir),
}, {
name: "beforestep-fail-continue",
content: defaultScriptPreamble + fmt.Sprintf(debugBeforeStepFailScriptTemplate, len(containers), debugInfoDir, RunDir),
}}...)
}
// Add debug or breakpoint related scripts to /tekton/debug/scripts
// Iterate through the debugScripts and add routine for each of them in the initContainer for their creation
for _, debugScript := range debugScripts {
tmpFile := filepath.Join(debugScriptsDir, fmt.Sprintf("%s-%s", "debug", debugScript.name))
heredoc := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(fmt.Sprintf("%s-%s-heredoc-randomly-generated", "debug", debugScript.name))
initContainer.Args[1] += fmt.Sprintf(initScriptDirective, tmpFile, heredoc, debugScript.content, heredoc)
}
}
// hasScripts determines if we need to generate scripts in InitContainer given steps, sidecars and breakpoints.
func hasScripts(steps []v1.Step, sidecars []v1.Sidecar, debugConfig *v1.TaskRunDebug) bool {
for _, s := range steps {
if s.Script != "" {
return true
}
}
for _, s := range sidecars {
if s.Script != "" {
return true
}
}
return debugConfig != nil && debugConfig.NeedsDebug()
}
func checkWindowsRequirement(steps []v1.Step, sidecars []v1.Sidecar) bool {
// Detect windows shebangs
for _, step := range steps {
cleaned := strings.TrimSpace(step.Script)
if strings.HasPrefix(cleaned, "#!win") {
return true
}
}
// If no step needs windows, then check sidecars to be sure
for _, sidecar := range sidecars {
cleaned := strings.TrimSpace(sidecar.Script)
if strings.HasPrefix(cleaned, "#!win") {
return true
}
}
return false
}
func extractWindowsScriptComponents(script string, fileName string) ([]string, []string, string, string) {
// Set the command to execute the correct script in the mounted volume.
shebangLine := strings.Split(script, "\n")[0]
splitLine := strings.Split(shebangLine, " ")
var command, args []string
if len(splitLine) > 1 {
strippedCommand := splitLine[1:]
command = strippedCommand[0:1]
// Handle legacy powershell limitation
if strings.HasPrefix(command[0], "powershell") {
fileName += ".ps1"
}
if len(strippedCommand) > 1 {
args = strippedCommand[1:]
args = append(args, fileName)
} else {
args = []string{fileName}
}
} else {
// If no interpreter is specified then strip the shebang and
// create a .cmd file
fileName += ".cmd"
commandLines := strings.Split(script, "\n")[1:]
script = strings.Join(commandLines, "\n")
command = []string{fileName}
args = []string{}
}
return command, args, script, fileName
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
corev1 "k8s.io/api/core/v1"
)
var (
// Used in security context of pod init containers
allowPrivilegeEscalation = false
runAsNonRoot = true
readOnlyRootFilesystem = true
// LinuxSecurityContext allow init containers to run in namespaces
// with "restricted" pod security admission
// See https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
LinuxSecurityContext = &corev1.SecurityContext{
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
RunAsNonRoot: &runAsNonRoot,
SeccompProfile: &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
}
// WindowsSecurityContext adds securityContext that is supported by Windows OS.
WindowsSecurityContext = &corev1.SecurityContext{
RunAsNonRoot: &runAsNonRoot,
}
)
// SecurityContextConfig is configuration for setting security context for init containers and affinity assistant container.
type SecurityContextConfig struct {
SetSecurityContext bool
SetReadOnlyRootFilesystem bool
}
func (c SecurityContextConfig) GetSecurityContext(isWindows bool) *corev1.SecurityContext {
if isWindows {
return WindowsSecurityContext
}
if !c.SetReadOnlyRootFilesystem {
return LinuxSecurityContext
}
securityContext := LinuxSecurityContext.DeepCopy()
securityContext.ReadOnlyRootFilesystem = &readOnlyRootFilesystem
return securityContext
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"context"
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/tektoncd/pipeline/internal/sidecarlogresults"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/result"
"github.com/tektoncd/pipeline/pkg/termination"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"knative.dev/pkg/apis"
)
// Aliased for backwards compatibility; do not add additional TaskRun reasons here
var (
// ReasonFailedResolution indicated that the reason for failure status is
// that references within the TaskRun could not be resolved
ReasonFailedResolution = v1.TaskRunReasonFailedResolution.String()
// ReasonFailedValidation indicated that the reason for failure status is
// that taskrun failed runtime validation
ReasonFailedValidation = v1.TaskRunReasonFailedValidation.String()
// ReasonTaskFailedValidation indicated that the reason for failure status is
// that task failed runtime validation
ReasonTaskFailedValidation = v1.TaskRunReasonTaskFailedValidation.String()
// ReasonResourceVerificationFailed indicates that the task fails the trusted resource verification,
// it could be the content has changed, signature is invalid or public key is invalid
ReasonResourceVerificationFailed = v1.TaskRunReasonResourceVerificationFailed.String()
)
const (
// ReasonExceededResourceQuota indicates that the TaskRun failed to create a pod due to
// a ResourceQuota in the namespace
ReasonExceededResourceQuota = "ExceededResourceQuota"
// ReasonExceededNodeResources indicates that the TaskRun's pod has failed to start due
// to resource constraints on the node
ReasonExceededNodeResources = "ExceededNodeResources"
// ReasonPullImageFailed indicates that the TaskRun's pod failed to pull image
ReasonPullImageFailed = "PullImageFailed"
// ReasonCreateContainerConfigError indicates that the TaskRun failed to create a pod due to
// config error of container
ReasonCreateContainerConfigError = "CreateContainerConfigError"
// ReasonPodCreationFailed indicates that the reason for the current condition
// is that the creation of the pod backing the TaskRun failed
ReasonPodCreationFailed = "PodCreationFailed"
// ReasonPodAdmissionFailed indicates that the TaskRun's pod failed to pass admission validation
ReasonPodAdmissionFailed = "PodAdmissionFailed"
// ReasonPending indicates that the pod is in corev1.Pending, and the reason is not
// ReasonExceededNodeResources or isPodHitConfigError
ReasonPodPending = "Pending"
// timeFormat is RFC3339 with millisecond
timeFormat = "2006-01-02T15:04:05.000Z07:00"
)
const (
oomKilled = "OOMKilled"
evicted = "Evicted"
)
// SidecarsReady returns true if all of the Pod's sidecars are Ready or
// Terminated.
func SidecarsReady(podStatus corev1.PodStatus) bool {
if podStatus.Phase != corev1.PodRunning {
return false
}
for _, s := range podStatus.ContainerStatuses {
// If the step indicates that it's a step, skip it.
// An injected sidecar might not have the "sidecar-" prefix, so
// we can't just look for that prefix, we need to look at any
// non-step container.
if IsContainerStep(s.Name) {
continue
}
if s.State.Running != nil && s.Ready {
continue
}
if s.State.Terminated != nil {
continue
}
return false
}
return true
}
// MakeTaskRunStatus returns a TaskRunStatus based on the Pod's status.
func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1.TaskRun, pod *corev1.Pod, kubeclient kubernetes.Interface, ts *v1.TaskSpec) (v1.TaskRunStatus, error) {
trs := &tr.Status
if trs.GetCondition(apis.ConditionSucceeded) == nil || trs.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown {
// If the taskRunStatus doesn't exist yet, it's because we just started running
markStatusRunning(trs, v1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing")
}
sortPodContainerStatuses(pod.Status.ContainerStatuses, pod.Spec.Containers)
complete := areContainersCompleted(ctx, pod) || isPodCompleted(pod)
// When EnableKubernetesSidecar is true, we need to ensure all init containers
// are completed before considering the taskRun complete, in addition to the regular containers.
// This is because sidecars in Kubernetes can keep running after the main containers complete.
if config.FromContextOrDefaults(ctx).FeatureFlags.EnableKubernetesSidecar {
complete = complete && areInitContainersCompleted(ctx, pod)
}
if complete {
onError, ok := tr.Annotations[v1.PipelineTaskOnErrorAnnotation]
if ok {
updateCompletedTaskRunStatus(logger, trs, pod, v1.PipelineTaskOnErrorType(onError))
} else {
updateCompletedTaskRunStatus(logger, trs, pod, "")
}
} else {
updateIncompleteTaskRunStatus(trs, pod)
}
trs.PodName = pod.Name
trs.Sidecars = []v1.SidecarState{}
var stepStatuses []corev1.ContainerStatus
var sidecarStatuses []corev1.ContainerStatus
for _, s := range pod.Status.ContainerStatuses {
if IsContainerStep(s.Name) {
stepStatuses = append(stepStatuses, s)
} else if IsContainerSidecar(s.Name) {
sidecarStatuses = append(sidecarStatuses, s)
}
}
for _, s := range pod.Status.InitContainerStatuses {
if IsContainerSidecar(s.Name) {
sidecarStatuses = append(sidecarStatuses, s)
}
}
err := setTaskRunStatusBasedOnStepStatus(ctx, logger, stepStatuses, &tr, pod.Status.Phase, kubeclient, ts)
setTaskRunStatusBasedOnSidecarStatus(sidecarStatuses, trs)
trs.Results = removeDuplicateResults(trs.Results)
return *trs, err
}
func createTaskResultsFromStepResults(stepRunRes []v1.TaskRunStepResult, neededStepResults map[string]string) []v1.TaskRunResult {
taskResults := []v1.TaskRunResult{}
for _, r := range stepRunRes {
// this result was requested by the Task
if _, ok := neededStepResults[r.Name]; ok {
taskRunResult := v1.TaskRunResult{
Name: neededStepResults[r.Name],
Type: r.Type,
Value: r.Value,
}
taskResults = append(taskResults, taskRunResult)
}
}
return taskResults
}
func setTaskRunArtifactsFromRunResult(runResults []result.RunResult, artifacts *v1.Artifacts) error {
for _, slr := range runResults {
if slr.ResultType == result.TaskRunArtifactsResultType {
return json.Unmarshal([]byte(slr.Value), artifacts)
}
}
return nil
}
func getTaskResultsFromSidecarLogs(runResults []result.RunResult) []result.RunResult {
taskResultsFromSidecarLogs := []result.RunResult{}
for _, slr := range runResults {
if slr.ResultType == result.TaskRunResultType {
taskResultsFromSidecarLogs = append(taskResultsFromSidecarLogs, slr)
}
}
return taskResultsFromSidecarLogs
}
func getStepResultsFromSidecarLogs(sidecarLogResults []result.RunResult, containerName string) ([]result.RunResult, error) {
stepResultsFromSidecarLogs := []result.RunResult{}
for _, slr := range sidecarLogResults {
if slr.ResultType == result.StepResultType {
stepName, resultName, err := sidecarlogresults.ExtractStepAndResultFromSidecarResultName(slr.Key)
if err != nil {
return []result.RunResult{}, err
}
if stepName == containerName {
slr.Key = resultName
stepResultsFromSidecarLogs = append(stepResultsFromSidecarLogs, slr)
}
}
}
return stepResultsFromSidecarLogs, nil
}
func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1.TaskRun, podPhase corev1.PodPhase, kubeclient kubernetes.Interface, ts *v1.TaskSpec) error {
trs := &tr.Status
var errs []error
// collect results from taskrun spec and taskspec
specResults := []v1.TaskResult{}
if tr.Spec.TaskSpec != nil {
specResults = append(specResults, tr.Spec.TaskSpec.Results...)
}
if ts != nil {
specResults = append(specResults, ts.Results...)
}
// Extract results from sidecar logs
sidecarLogsResultsEnabled := config.FromContextOrDefaults(ctx).FeatureFlags.ResultExtractionMethod == config.ResultExtractionMethodSidecarLogs
// temporary solution to check if artifacts sidecar created in taskRun as we don't have the api for users to declare if a step/task is producing artifacts yet
artifactsSidecarCreated := artifactsPathReferenced(ts.Steps)
sidecarLogResults := []result.RunResult{}
if sidecarLogsResultsEnabled {
// extraction of results from sidecar logs
if tr.Status.TaskSpec.Results != nil || artifactsSidecarCreated {
slr, err := sidecarlogresults.GetResultsFromSidecarLogs(ctx, kubeclient, tr.Namespace, tr.Status.PodName, pipeline.ReservedResultsSidecarContainerName, podPhase)
if err != nil {
errs = append(errs, err)
}
sidecarLogResults = append(sidecarLogResults, slr...)
}
}
// Populate Task results from sidecar logs
taskResultsFromSidecarLogs := getTaskResultsFromSidecarLogs(sidecarLogResults)
taskResults, _, _ := filterResults(taskResultsFromSidecarLogs, specResults, nil)
if tr.IsDone() {
trs.Results = append(trs.Results, taskResults...)
var tras v1.Artifacts
err := setTaskRunArtifactsFromRunResult(sidecarLogResults, &tras)
if err != nil {
logger.Errorf("Failed to set artifacts value from sidecar logs: %v", err)
errs = append(errs, err)
} else {
trs.Artifacts = &tras
}
}
// Build a lookup map for step state provenances.
stepStateProvenances := make(map[string]*v1.Provenance)
for _, ss := range trs.Steps {
stepStateProvenances[ss.Name] = ss.Provenance
}
// Continue with extraction of termination messages
orderedStepStates := make([]v1.StepState, len(stepStatuses))
for i, s := range stepStatuses {
// Avoid changing the original value by modifying the pointer value.
state := s.State.DeepCopy()
taskRunStepResults := []v1.TaskRunStepResult{}
// Identify Step Results
stepResults := []v1.StepResult{}
if ts != nil {
for _, step := range ts.Steps {
if GetContainerName(step.Name) == s.Name {
stepResults = append(stepResults, step.Results...)
}
}
}
// Identify StepResults needed by the Task Results
neededStepResults, err := findStepResultsFetchedByTask(s.Name, specResults)
if err != nil {
errs = append(errs, err)
}
// populate step results from sidecar logs
stepResultsFromSidecarLogs, err := getStepResultsFromSidecarLogs(sidecarLogResults, s.Name)
if err != nil {
errs = append(errs, err)
}
_, stepRunRes, _ := filterResults(stepResultsFromSidecarLogs, specResults, stepResults)
if tr.IsDone() {
taskRunStepResults = append(taskRunStepResults, stepRunRes...)
// Set TaskResults from StepResults
trs.Results = append(trs.Results, createTaskResultsFromStepResults(stepRunRes, neededStepResults)...)
}
var sas v1.Artifacts
err = setStepArtifactsValueFromSidecarLogResult(sidecarLogResults, s.Name, &sas)
if err != nil {
logger.Errorf("Failed to set artifacts value from sidecar logs: %v", err)
errs = append(errs, err)
}
// Parse termination messages
terminationReason := ""
if state.Terminated != nil && len(state.Terminated.Message) != 0 {
msg := state.Terminated.Message
results, err := termination.ParseMessage(logger, msg)
if err != nil {
logger.Errorf("termination message could not be parsed sas JSON: %v", err)
errs = append(errs, err)
} else {
err := setStepArtifactsValueFromTerminationMessageRunResult(results, &sas)
if err != nil {
logger.Errorf("error setting step artifacts of step %q in taskrun %q: %v", s.Name, tr.Name, err)
errs = append(errs, err)
}
time, err := extractStartedAtTimeFromResults(results)
if err != nil {
logger.Errorf("error setting the start time of step %q in taskrun %q: %v", s.Name, tr.Name, err)
errs = append(errs, err)
}
exitCode, err := extractExitCodeFromResults(results)
if err != nil {
logger.Errorf("error extracting the exit code of step %q in taskrun %q: %v", s.Name, tr.Name, err)
errs = append(errs, err)
}
taskResults, stepRunRes, filteredResults := filterResults(results, specResults, stepResults)
if tr.IsDone() {
taskRunStepResults = append(taskRunStepResults, stepRunRes...)
// Set TaskResults from StepResults
taskResults = append(taskResults, createTaskResultsFromStepResults(stepRunRes, neededStepResults)...)
trs.Results = append(trs.Results, taskResults...)
var tras v1.Artifacts
err := setTaskRunArtifactsFromRunResult(filteredResults, &tras)
if err != nil {
logger.Errorf("error setting step artifacts in taskrun %q: %v", tr.Name, err)
errs = append(errs, err)
}
trs.Artifacts.Merge(&tras)
trs.Artifacts.Merge(&sas)
}
msg, err = createMessageFromResults(filteredResults)
if err != nil {
logger.Errorf("%v", err)
errs = append(errs, err)
} else {
state.Terminated.Message = msg
}
if time != nil {
state.Terminated.StartedAt = *time
}
if exitCode != nil {
state.Terminated.ExitCode = *exitCode
}
terminationFromResults := extractTerminationReasonFromResults(results)
terminationReason = getTerminationReason(state.Terminated.Reason, terminationFromResults, exitCode)
}
}
stepState := v1.StepState{
ContainerState: *state.DeepCopy(),
Name: TrimStepPrefix(s.Name),
Container: s.Name,
ImageID: s.ImageID,
Results: taskRunStepResults,
TerminationReason: terminationReason,
Inputs: sas.Inputs,
Outputs: sas.Outputs,
}
if stepStateProvenance, exist := stepStateProvenances[stepState.Name]; exist {
stepState.Provenance = stepStateProvenance
}
orderedStepStates[i] = stepState
}
if len(orderedStepStates) > 0 {
trs.Steps = orderedStepStates
}
return errors.Join(errs...)
}
func setStepArtifactsValueFromSidecarLogResult(results []result.RunResult, name string, artifacts *v1.Artifacts) error {
for _, r := range results {
if r.Key == name && r.ResultType == result.StepArtifactsResultType {
return json.Unmarshal([]byte(r.Value), artifacts)
}
}
return nil
}
func setStepArtifactsValueFromTerminationMessageRunResult(results []result.RunResult, artifacts *v1.Artifacts) error {
for _, r := range results {
if r.ResultType == result.StepArtifactsResultType {
return json.Unmarshal([]byte(r.Value), artifacts)
}
}
return nil
}
func setTaskRunStatusBasedOnSidecarStatus(sidecarStatuses []corev1.ContainerStatus, trs *v1.TaskRunStatus) {
for _, s := range sidecarStatuses {
trs.Sidecars = append(trs.Sidecars, v1.SidecarState{
ContainerState: *s.State.DeepCopy(),
Name: TrimSidecarPrefix(s.Name),
Container: s.Name,
ImageID: s.ImageID,
})
}
}
func createMessageFromResults(results []result.RunResult) (string, error) {
if len(results) == 0 {
return "", nil
}
bytes, err := json.Marshal(results)
if err != nil {
return "", fmt.Errorf("error marshalling remaining results back into termination message: %w", err)
}
return string(bytes), nil
}
// findStepResultsFetchedByTask fetches step results that the Task needs.
// It accepts a container name and the TaskResults as input and outputs
// a map with the name of the step result as the key and the name of the task result that is fetching it as value.
func findStepResultsFetchedByTask(containerName string, specResults []v1.TaskResult) (map[string]string, error) {
neededStepResults := map[string]string{}
for _, r := range specResults {
if r.Value != nil {
if r.Value.StringVal != "" {
sName, resultName, err := v1.ExtractStepResultName(r.Value.StringVal)
if err != nil {
return nil, err
}
// Only look at named results - referencing unnamed steps is unsupported.
if GetContainerName(sName) == containerName {
neededStepResults[resultName] = r.Name
}
}
}
}
return neededStepResults, nil
}
// filterResults filters the RunResults and TaskResults based on the results declared in the task spec.
// It returns a slice of any of the input results that are defined in the task spec, converted to TaskRunResults,
// and a slice of any of the RunResults that don't represent internal values (i.e. those that should not be displayed in the TaskRun status.
func filterResults(results []result.RunResult, specResults []v1.TaskResult, stepResults []v1.StepResult) ([]v1.TaskRunResult, []v1.TaskRunStepResult, []result.RunResult) {
var taskResults []v1.TaskRunResult
var taskRunStepResults []v1.TaskRunStepResult
var filteredResults []result.RunResult
neededTypes := make(map[string]v1.ResultsType)
neededStepTypes := make(map[string]v1.ResultsType)
for _, r := range specResults {
neededTypes[r.Name] = r.Type
}
for _, r := range stepResults {
neededStepTypes[r.Name] = r.Type
}
for _, r := range results {
switch r.ResultType {
case result.TaskRunResultType:
var taskRunResult v1.TaskRunResult
if neededTypes[r.Key] == v1.ResultsTypeString {
taskRunResult = v1.TaskRunResult{
Name: r.Key,
Type: v1.ResultsTypeString,
Value: *v1.NewStructuredValues(r.Value),
}
} else {
v := v1.ResultValue{}
err := v.UnmarshalJSON([]byte(r.Value))
if err != nil {
continue
}
taskRunResult = v1.TaskRunResult{
Name: r.Key,
Type: v1.ResultsType(v.Type),
Value: v,
}
}
taskResults = append(taskResults, taskRunResult)
filteredResults = append(filteredResults, r)
case result.StepResultType:
var taskRunStepResult v1.TaskRunStepResult
if neededStepTypes[r.Key] == v1.ResultsTypeString {
taskRunStepResult = v1.TaskRunStepResult{
Name: r.Key,
Type: v1.ResultsTypeString,
Value: *v1.NewStructuredValues(r.Value),
}
} else {
v := v1.ResultValue{}
err := v.UnmarshalJSON([]byte(r.Value))
if err != nil {
continue
}
taskRunStepResult = v1.TaskRunStepResult{
Name: r.Key,
Type: v1.ResultsType(v.Type),
Value: v,
}
}
taskRunStepResults = append(taskRunStepResults, taskRunStepResult)
filteredResults = append(filteredResults, r)
case result.StepArtifactsResultType:
filteredResults = append(filteredResults, r)
continue
case result.TaskRunArtifactsResultType:
filteredResults = append(filteredResults, r)
continue
case result.InternalTektonResultType:
// Internal messages are ignored because they're not used as external result
continue
default:
filteredResults = append(filteredResults, r)
}
}
return taskResults, taskRunStepResults, filteredResults
}
func removeDuplicateResults(taskRunResult []v1.TaskRunResult) []v1.TaskRunResult {
if len(taskRunResult) == 0 {
return nil
}
uniq := make([]v1.TaskRunResult, 0)
latest := make(map[string]v1.TaskRunResult, 0)
for _, res := range taskRunResult {
if _, seen := latest[res.Name]; !seen {
uniq = append(uniq, res)
}
latest[res.Name] = res
}
for i, res := range uniq {
uniq[i] = latest[res.Name]
}
return uniq
}
func extractStartedAtTimeFromResults(results []result.RunResult) (*metav1.Time, error) {
for _, result := range results {
if result.Key == "StartedAt" {
t, err := time.Parse(timeFormat, result.Value)
if err != nil {
return nil, fmt.Errorf("could not parse time value %q in StartedAt field: %w", result.Value, err)
}
startedAt := metav1.NewTime(t)
return &startedAt, nil
}
}
return nil, nil //nolint:nilnil // would be more ergonomic to return a sentinel error
}
func extractExitCodeFromResults(results []result.RunResult) (*int32, error) {
for _, result := range results {
if result.Key == "ExitCode" {
// We could just pass the string through but this provides extra validation
i, err := strconv.ParseInt(result.Value, 10, 32)
if err != nil {
return nil, fmt.Errorf("could not parse int value %q in ExitCode field: %w", result.Value, err)
}
exitCode := int32(i) // #nosec G115: ParseInt was called with bit size 32, so this is safe
return &exitCode, nil
}
}
return nil, nil //nolint:nilnil // would be more ergonomic to return a sentinel error
}
func extractTerminationReasonFromResults(results []result.RunResult) string {
for _, r := range results {
if r.ResultType == result.InternalTektonResultType && r.Key == "Reason" {
return r.Value
}
}
return ""
}
func getTerminationReason(terminatedStateReason string, terminationFromResults string, exitCodeFromResults *int32) string {
if terminationFromResults != "" {
return terminationFromResults
}
if exitCodeFromResults != nil {
return TerminationReasonContinued
}
return terminatedStateReason
}
func updateCompletedTaskRunStatus(logger *zap.SugaredLogger, trs *v1.TaskRunStatus, pod *corev1.Pod, onError v1.PipelineTaskOnErrorType) {
if DidTaskRunFail(pod) {
msg := getFailureMessage(logger, pod)
if onError == v1.PipelineTaskContinue {
markStatusFailure(trs, v1.TaskRunReasonFailureIgnored.String(), msg)
} else {
markStatusFailure(trs, v1.TaskRunReasonFailed.String(), msg)
}
} else {
markStatusSuccess(trs)
}
// update tr completed time
trs.CompletionTime = &metav1.Time{Time: time.Now()}
}
func updateIncompleteTaskRunStatus(trs *v1.TaskRunStatus, pod *corev1.Pod) {
switch pod.Status.Phase {
case corev1.PodRunning:
markStatusRunning(trs, v1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing")
case corev1.PodPending:
switch {
case IsPodExceedingNodeResources(pod):
markStatusRunning(trs, ReasonExceededNodeResources, "TaskRun Pod exceeded available resources")
case isSubPathDirectoryError(pod):
// if subPath directory creation errors, mark as running and wait for recovery
markStatusRunning(trs, ReasonPodPending, "Waiting for subPath directory creation to complete")
case isPodHitConfigError(pod):
markStatusFailure(trs, ReasonCreateContainerConfigError, "Failed to create pod due to config error")
case isPullImageError(pod):
markStatusRunning(trs, ReasonPullImageFailed, getWaitingMessage(pod))
default:
markStatusRunning(trs, ReasonPodPending, getWaitingMessage(pod))
}
case corev1.PodSucceeded, corev1.PodFailed, corev1.PodUnknown:
// Do nothing; pod has completed or is in an unknown state.
}
}
// isPodCompleted checks if the given pod is completed.
// A pod is considered completed if its phase is either "Succeeded" or "Failed".
//
// If it is foreseeable that the pod will eventually be in a failed state,
// but it remains in a Running status for a visible period of time, it should be considered completed in advance.
//
// For example, when certain steps encounter OOM, only the pods that have timed out will change to a failed state,
// we should consider them completed in advance.
func isPodCompleted(pod *corev1.Pod) bool {
if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed {
return true
}
for _, s := range pod.Status.ContainerStatuses {
if IsContainerStep(s.Name) {
if s.State.Terminated != nil {
if isOOMKilled(s) {
return true
}
}
}
}
return false
}
// DidTaskRunFail check the status of pod to decide if related taskrun is failed
func DidTaskRunFail(pod *corev1.Pod) bool {
if pod.Status.Phase == corev1.PodFailed {
return true
}
for _, s := range pod.Status.ContainerStatuses {
if IsContainerStep(s.Name) {
if s.State.Terminated != nil {
if s.State.Terminated.ExitCode != 0 || isOOMKilled(s) {
return true
}
}
}
}
return false
}
// IsPodArchived indicates if a pod is archived in the retriesStatus.
func IsPodArchived(pod *corev1.Pod, trs *v1.TaskRunStatus) bool {
for _, retryStatus := range trs.RetriesStatus {
if retryStatus.PodName == pod.GetName() {
return true
}
}
return false
}
// containerNameFilter is a function that filters container names.
type containerNameFilter func(name string) bool
// isMatchingAnyFilter returns true if the container name matches any of the filters.
func isMatchingAnyFilter(name string, filters []containerNameFilter) bool {
for _, filter := range filters {
if filter(name) {
return true
}
}
return false
}
// areInitContainersCompleted returns true if all init containers in the pod are completed.
func areInitContainersCompleted(ctx context.Context, pod *corev1.Pod) bool {
if len(pod.Status.InitContainerStatuses) == 0 ||
!(pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodSucceeded) {
return false
}
for _, containerStatus := range pod.Status.InitContainerStatuses {
if containerStatus.State.Terminated == nil {
// if any init container is not completed, return false
return false
}
}
return true
}
// areContainersCompleted returns true if all related containers in the pod are completed.
func areContainersCompleted(ctx context.Context, pod *corev1.Pod) bool {
nameFilters := []containerNameFilter{IsContainerStep}
if config.FromContextOrDefaults(ctx).FeatureFlags.ResultExtractionMethod == config.ResultExtractionMethodSidecarLogs {
// If we are using sidecar logs to extract results, we need to wait for the sidecar to complete.
// Avoid failing to obtain the final result from the sidecar because the sidecar is not yet complete.
nameFilters = append(nameFilters, func(name string) bool {
return name == pipeline.ReservedResultsSidecarContainerName
})
}
return checkContainersCompleted(pod, nameFilters)
}
// checkContainersCompleted returns true if containers in the pod are completed.
func checkContainersCompleted(pod *corev1.Pod, nameFilters []containerNameFilter) bool {
if len(pod.Status.ContainerStatuses) == 0 ||
!(pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodSucceeded) {
return false
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if isMatchingAnyFilter(containerStatus.Name, nameFilters) && containerStatus.State.Terminated == nil {
// if any container is not completed, return false
return false
}
}
return true
}
func getFailureMessage(logger *zap.SugaredLogger, pod *corev1.Pod) string {
// If a pod was evicted, use the pods status message before trying to
// determine a failure message from the pod's container statuses. A
// container may have a generic exit code that contains less information,
// such as an exit code and message related to not being located.
if pod.Status.Reason == evicted {
return pod.Status.Message
}
// First, try to surface an error about the actual init container that failed.
for _, status := range pod.Status.InitContainerStatuses {
if msg := extractContainerFailureMessage(logger, status, pod.ObjectMeta); len(msg) > 0 {
return "init container failed, " + msg
}
}
// Next, try to surface an error about the actual build step that failed.
for _, status := range pod.Status.ContainerStatuses {
if msg := extractContainerFailureMessage(logger, status, pod.ObjectMeta); len(msg) > 0 {
return msg
}
}
// Next, return the Pod's status message if it has one.
if pod.Status.Message != "" {
return pod.Status.Message
}
for _, s := range pod.Status.ContainerStatuses {
if IsContainerStep(s.Name) {
if s.State.Terminated != nil {
if isOOMKilled(s) {
return oomKilled
}
}
}
}
// Lastly fall back on a generic error message.
return "build failed for unspecified reasons."
}
// extractContainerFailureMessage returns the container failure message by container status or init container status.
func extractContainerFailureMessage(logger *zap.SugaredLogger, status corev1.ContainerStatus, podMetaData metav1.ObjectMeta) string {
term := status.State.Terminated
if term != nil {
msg := status.State.Terminated.Message
r, _ := termination.ParseMessage(logger, msg)
for _, runResult := range r {
if runResult.ResultType == result.InternalTektonResultType && runResult.Key == "Reason" && runResult.Value == TerminationReasonTimeoutExceeded {
return fmt.Sprintf("%q exited because the step exceeded the specified timeout limit", status.Name)
}
}
if term.ExitCode != 0 {
// Include the termination reason, if available to add clarity for causes such as external signals, e.g. OOM
if term.Reason != "" {
return fmt.Sprintf("%q exited with code %d: %s", status.Name, term.ExitCode, term.Reason)
}
return fmt.Sprintf("%q exited with code %d", status.Name, term.ExitCode)
}
}
return ""
}
// IsPodExceedingNodeResources returns true if the Pod's status indicates there
// are insufficient resources to schedule the Pod.
func IsPodExceedingNodeResources(pod *corev1.Pod) bool {
for _, podStatus := range pod.Status.Conditions {
if podStatus.Reason == corev1.PodReasonUnschedulable && strings.Contains(podStatus.Message, "Insufficient") {
return true
}
}
return false
}
// isPodHitConfigError returns true if the Pod's status undicates there are config error raised
func isPodHitConfigError(pod *corev1.Pod) bool {
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason == ReasonCreateContainerConfigError {
// for subPath directory creation errors, we want to allow recovery
if strings.Contains(containerStatus.State.Waiting.Message, "failed to create subPath directory") {
return false
}
return true
}
}
return false
}
// isPullImageError returns true if the Pod's status indicates there are any error when pulling image
func isPullImageError(pod *corev1.Pod) bool {
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.State.Waiting != nil && isImageErrorReason(containerStatus.State.Waiting.Reason) {
return true
}
}
return false
}
func isImageErrorReason(reason string) bool {
// Reference from https://github.com/kubernetes/kubernetes/blob/a1c8e9386af844757333733714fa1757489735b3/pkg/kubelet/images/types.go#L26
imageErrorReasons := []string{
"ImagePullBackOff",
"ImageInspectError",
"ErrImagePull",
"ErrImageNeverPull",
"RegistryUnavailable",
"InvalidImageName",
}
for _, imageReason := range imageErrorReasons {
if imageReason == reason {
return true
}
}
return false
}
func getWaitingMessage(pod *corev1.Pod) string {
// First, try to surface reason for pending/unknown about the actual build step.
for _, status := range pod.Status.ContainerStatuses {
wait := status.State.Waiting
if wait != nil && wait.Message != "" {
return fmt.Sprintf("build step %q is pending with reason %q",
status.Name, wait.Message)
}
}
// Try to surface underlying reason by inspecting pod's recent status if condition is not true
for i, podStatus := range pod.Status.Conditions {
if podStatus.Status != corev1.ConditionTrue {
return fmt.Sprintf("pod status %q:%q; message: %q",
pod.Status.Conditions[i].Type,
pod.Status.Conditions[i].Status,
pod.Status.Conditions[i].Message)
}
}
// Next, return the Pod's status message if it has one.
if pod.Status.Message != "" {
return pod.Status.Message
}
// Lastly fall back on a generic pending message.
return "Pending"
}
// markStatusRunning sets taskrun status to running
func markStatusRunning(trs *v1.TaskRunStatus, reason, message string) {
trs.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
Reason: reason,
Message: message,
})
}
// markStatusFailure sets taskrun status to failure with specified reason
func markStatusFailure(trs *v1.TaskRunStatus, reason string, message string) {
trs.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: reason,
Message: message,
})
}
// markStatusSuccess sets taskrun status to success
func markStatusSuccess(trs *v1.TaskRunStatus) {
trs.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
Reason: v1.TaskRunReasonSuccessful.String(),
Message: "All Steps have completed executing",
})
}
// sortPodContainerStatuses reorders a pod's container statuses so that
// they're in the same order as the step containers from the TaskSpec.
func sortPodContainerStatuses(podContainerStatuses []corev1.ContainerStatus, podSpecContainers []corev1.Container) {
statuses := map[string]corev1.ContainerStatus{}
for _, status := range podContainerStatuses {
statuses[status.Name] = status
}
for i, c := range podSpecContainers {
// prevent out-of-bounds panic on incorrectly formed lists
if i < len(podContainerStatuses) {
podContainerStatuses[i] = statuses[c.Name]
}
}
}
func isOOMKilled(s corev1.ContainerStatus) bool {
return s.State.Terminated.Reason == oomKilled
}
func isSubPathDirectoryError(pod *corev1.Pod) bool {
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.State.Waiting != nil &&
containerStatus.State.Waiting.Reason == ReasonCreateContainerConfigError &&
strings.Contains(containerStatus.State.Waiting.Message, "failed to create subPath directory") {
return true
}
}
return false
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"path/filepath"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
// workingDirInit returns a Container that should be run as an init
// container to ensure that all steps' workingDirs relative to the workspace
// exist.
//
// If no such directories need to be created (i.e., no relative workingDirs
// are specified), this method returns nil, as no init container is necessary.
// If setSecurityContext is true, the init container will include a security context
// allowing it to run in namespaces with restriced pod security admission.
// If the init container will run on windows, `windows` should be set to `true`,
// so that the correct security context can be applied.
func workingDirInit(workingdirinitImage string, stepContainers []corev1.Container, securityContext SecurityContextConfig, windows bool) *corev1.Container {
// Gather all unique workingDirs.
workingDirs := sets.NewString()
for _, step := range stepContainers {
if step.WorkingDir != "" {
workingDirs.Insert(step.WorkingDir)
}
}
if workingDirs.Len() == 0 {
return nil
}
// Clean and append each relative workingDir.
var relativeDirs []string
for _, wd := range workingDirs.List() {
p := filepath.Clean(wd)
if !filepath.IsAbs(p) || strings.HasPrefix(p, "/workspace/") {
relativeDirs = append(relativeDirs, p)
}
}
if len(relativeDirs) == 0 {
// There are no workingDirs to initialize.
return nil
}
c := &corev1.Container{
Name: "working-dir-initializer",
Image: workingdirinitImage,
Command: []string{"/ko-app/workingdirinit"},
Args: relativeDirs,
WorkingDir: pipeline.WorkspaceDir,
VolumeMounts: implicitVolumeMounts,
}
if securityContext.SetSecurityContext {
c.SecurityContext = securityContext.GetSecurityContext(windows)
}
return c
}
package apiserver
import (
"context"
"errors"
"fmt"
"github.com/google/uuid"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
var (
ErrReferencedObjectValidationFailed = errors.New("validation failed for referenced object")
ErrCouldntValidateObjectRetryable = errors.New("retryable error validating referenced object")
ErrCouldntValidateObjectPermanent = errors.New("permanent error validating referenced object")
)
// DryRunValidate validates the obj by issuing a dry-run create request for it in the given namespace.
// This allows validating admission webhooks to process the object without actually creating it.
// obj must be a v1/v1beta1 Task or Pipeline.
func DryRunValidate(ctx context.Context, namespace string, obj runtime.Object, tekton clientset.Interface) (runtime.Object, error) {
dryRunObjName := uuid.NewString() // Use a randomized name for the Pipeline/Task in case there is already another Pipeline/Task of the same name
switch obj := obj.(type) {
case *v1.Pipeline:
dryRunObj := obj.DeepCopy()
dryRunObj.Name = dryRunObjName
dryRunObj.Namespace = namespace // Make sure the namespace is the same as the PipelineRun
mutatedObj, err := tekton.TektonV1().Pipelines(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
return nil, handleDryRunCreateErr(err, obj.Name)
}
return mutatedObj, nil
case *v1beta1.Pipeline:
dryRunObj := obj.DeepCopy()
dryRunObj.Name = dryRunObjName
dryRunObj.Namespace = namespace // Make sure the namespace is the same as the PipelineRun
mutatedObj, err := tekton.TektonV1beta1().Pipelines(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
return nil, handleDryRunCreateErr(err, obj.Name)
}
return mutatedObj, nil
case *v1.Task:
dryRunObj := obj.DeepCopy()
dryRunObj.Name = dryRunObjName
dryRunObj.Namespace = namespace // Make sure the namespace is the same as the TaskRun
mutatedObj, err := tekton.TektonV1().Tasks(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
return nil, handleDryRunCreateErr(err, obj.Name)
}
return mutatedObj, nil
case *v1beta1.Task:
dryRunObj := obj.DeepCopy()
dryRunObj.Name = dryRunObjName
dryRunObj.Namespace = namespace // Make sure the namespace is the same as the TaskRun
mutatedObj, err := tekton.TektonV1beta1().Tasks(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
return nil, handleDryRunCreateErr(err, obj.Name)
}
return mutatedObj, nil
case *v1alpha1.StepAction:
dryRunObj := obj.DeepCopy()
dryRunObj.Name = dryRunObjName
dryRunObj.Namespace = namespace // Make sure the namespace is the same as the StepAction
mutatedObj, err := tekton.TektonV1alpha1().StepActions(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
return nil, handleDryRunCreateErr(err, obj.Name)
}
return mutatedObj, nil
case *v1beta1.StepAction:
dryRunObj := obj.DeepCopy()
dryRunObj.Name = dryRunObjName
dryRunObj.Namespace = namespace // Make sure the namespace is the same as the StepAction
mutatedObj, err := tekton.TektonV1beta1().StepActions(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
return nil, handleDryRunCreateErr(err, obj.Name)
}
return mutatedObj, nil
default:
return nil, fmt.Errorf("unsupported object GVK %s", obj.GetObjectKind().GroupVersionKind())
}
}
func handleDryRunCreateErr(err error, objectName string) error {
var errType error
switch {
case apierrors.IsBadRequest(err): // Object rejected by validating webhook
errType = ErrReferencedObjectValidationFailed
case apierrors.IsInvalid(err), apierrors.IsMethodNotSupported(err):
errType = pipelineErrors.WrapUserError(ErrCouldntValidateObjectPermanent)
case apierrors.IsTimeout(err), apierrors.IsServerTimeout(err), apierrors.IsTooManyRequests(err):
errType = ErrCouldntValidateObjectRetryable
default:
// Assume unknown errors are retryable
// Additional errors can be added to the switch statements as needed
errType = ErrCouldntValidateObjectRetryable
}
return fmt.Errorf("%w %s: %w", errType, objectName, err)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"encoding/json"
"errors"
"fmt"
cloudevents "github.com/cloudevents/sdk-go/v2"
lru "github.com/hashicorp/golang-lru"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
)
// Struct to unmarshal the event data
type eventData struct {
CustomRun *v1beta1.CustomRun `json:"customRun,omitempty"`
}
// ContainsOrAddCloudEvent checks if the event exists in the cache
func ContainsOrAddCloudEvent(cacheClient *lru.Cache, event *cloudevents.Event) (bool, error) {
if cacheClient == nil {
return false, errors.New("cache client is nil")
}
eventKey, err := EventKey(event)
if err != nil {
return false, err
}
isPresent, _ := cacheClient.ContainsOrAdd(eventKey, nil)
return isPresent, nil
}
// EventKey defines whether an event is considered different from another
// in future we might want to let specific event types override this
func EventKey(event *cloudevents.Event) (string, error) {
var (
data eventData
resourceName string
resourceNamespace string
resourceKind string
)
err := json.Unmarshal(event.Data(), &data)
if err != nil {
return "", err
}
if data.CustomRun == nil {
return "", fmt.Errorf("invalid CustomRun data in %v", event)
}
resourceName = data.CustomRun.Name
resourceNamespace = data.CustomRun.Namespace
resourceKind = "customrun"
eventType := event.Type()
return fmt.Sprintf("%s/%s/%s/%s", eventType, resourceKind, resourceNamespace, resourceName), nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
lru "github.com/hashicorp/golang-lru"
"k8s.io/client-go/rest"
"knative.dev/pkg/injection"
"knative.dev/pkg/logging"
)
// With 4 events per Run, we can store events for 1024 concurrent Runs
const bufferSize = 4096
func init() {
injection.Default.RegisterClient(withCacheClient)
}
// cacheKey is a way to associate the Cache from inside the context.Context
type cacheKey struct{}
func withCacheClientFromSize(ctx context.Context, size int) context.Context {
logger := logging.FromContext(ctx)
cacheClient, err := lru.New(size)
logger.Infof("CACHE CLIENT %+v", cacheClient)
if err != nil {
logger.Error("unable to create cacheClient :" + err.Error())
}
return ToContext(ctx, cacheClient)
}
func withCacheClient(ctx context.Context, cfg *rest.Config) context.Context {
return withCacheClientFromSize(ctx, bufferSize)
}
// Get extracts the cloudEventClient client from the context.
func Get(ctx context.Context) *lru.Cache {
untyped := ctx.Value(cacheKey{})
if untyped == nil {
logging.FromContext(ctx).Errorf("Unable to fetch client from context.")
return nil
}
return untyped.(*lru.Cache)
}
// ToContext adds the cloud events client to the context
func ToContext(ctx context.Context, c *lru.Cache) context.Context {
return context.WithValue(ctx, cacheKey{}, c)
}
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"k8s.io/client-go/rest"
"knative.dev/pkg/injection"
)
const fakeBufferSize = 128
func init() {
injection.Fake.RegisterClient(withFakeCacheClient)
}
func withFakeCacheClient(ctx context.Context, cfg *rest.Config) context.Context {
return withCacheClientFromSize(ctx, fakeBufferSize)
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudevent
import (
"context"
"errors"
"time"
cloudevents "github.com/cloudevents/sdk-go/v2"
lru "github.com/hashicorp/golang-lru"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/reconciler/events/cache"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/runtime"
"knative.dev/pkg/apis"
controller "knative.dev/pkg/controller"
"knative.dev/pkg/logging"
)
func cloudEventsSink(ctx context.Context) string {
configs := config.FromContextOrDefaults(ctx)
// Try the sink configuration first
sink := configs.Events.Sink
if sink == "" {
// Fall back to the deprecated flag is the new one is not set
// This ensures no changes in behaviour for existing users of the deprecated flag
sink = configs.Defaults.DefaultCloudEventsSink
}
return sink
}
// EmitCloudEvents emits CloudEvents (only) for object
func EmitCloudEvents(ctx context.Context, object runtime.Object) {
logger := logging.FromContext(ctx)
if sink := cloudEventsSink(ctx); sink != "" {
ctx = cloudevents.ContextWithTarget(ctx, sink)
err := SendCloudEventWithRetries(ctx, object)
if err != nil {
logger.Warnf("Failed to emit cloud events %v", err.Error())
}
}
}
// EmitCloudEventsWhenConditionChange emits CloudEvents when there is a change in condition
func EmitCloudEventsWhenConditionChange(ctx context.Context, beforeCondition *apis.Condition, afterCondition *apis.Condition, object runtime.Object) {
logger := logging.FromContext(ctx)
if sink := cloudEventsSink(ctx); sink != "" {
ctx = cloudevents.ContextWithTarget(ctx, sink)
// Only send events if the new condition represents a change
if !equality.Semantic.DeepEqual(beforeCondition, afterCondition) {
err := SendCloudEventWithRetries(ctx, object)
if err != nil {
logger.Warnf("Failed to emit cloud events %v", err.Error())
}
}
}
}
// SendCloudEventWithRetries sends a cloud event for the specified resource.
// It does not block and it perform retries with backoff using the cloudevents
// sdk-go capabilities.
// It accepts a runtime.Object to avoid making objectWithCondition public since
// it's only used within the events/cloudevents packages.
func SendCloudEventWithRetries(ctx context.Context, object runtime.Object) error {
var (
o objectWithCondition
ok bool
cacheClient *lru.Cache
)
if o, ok = object.(objectWithCondition); !ok {
return errors.New("input object does not satisfy objectWithCondition")
}
logger := logging.FromContext(ctx)
ceClient := Get(ctx)
if ceClient == nil {
return errors.New("no cloud events client found in the context")
}
event, err := EventForObjectWithCondition(ctx, o)
if err != nil {
return err
}
// Events for CustomRuns require a cache of events that have been sent
_, isCustomRun := object.(*v1beta1.CustomRun)
if isCustomRun {
cacheClient = cache.Get(ctx)
}
wasIn := make(chan error)
ceClient.addCount()
go func() {
defer ceClient.decreaseCount()
wasIn <- nil
logger.Debugf("Sending cloudevent of type %q", event.Type())
// In case of Run event, check cache if cloudevent is already sent
if isCustomRun {
cloudEventSent, err := cache.ContainsOrAddCloudEvent(cacheClient, event)
if err != nil {
logger.Errorf("Error while checking cache: %s", err)
}
if cloudEventSent {
logger.Infof("cloudevent %v already sent", event)
return
}
}
if result := ceClient.Send(cloudevents.ContextWithRetriesExponentialBackoff(ctx, 10*time.Millisecond, 10), *event); !cloudevents.IsACK(result) {
logger.Warnf("Failed to send cloudevent: %s", result.Error())
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
logger.Warnf("No recorder in context, cannot emit error event")
return
}
recorder.Event(object, corev1.EventTypeWarning, "Cloud Event Failure", result.Error())
}
}()
return <-wasIn
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudevent
import (
"context"
"errors"
"fmt"
"strings"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/google/uuid"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"knative.dev/pkg/apis"
)
// TektonEventType holds the types of cloud events sent by Tekton
type TektonEventType string
const (
// TaskRunStartedEventV1 is sent for TaskRuns with "ConditionSucceeded" "Unknown"
// the first time they are picked up by the reconciler
TaskRunStartedEventV1 TektonEventType = "dev.tekton.event.taskrun.started.v1"
// TaskRunRunningEventV1 is sent for TaskRuns with "ConditionSucceeded" "Unknown"
// once the TaskRun is validated and Pod created
TaskRunRunningEventV1 TektonEventType = "dev.tekton.event.taskrun.running.v1"
// TaskRunUnknownEventV1 is sent for TaskRuns with "ConditionSucceeded" "Unknown"
// It can be used as a confirmation that the TaskRun is still running.
TaskRunUnknownEventV1 TektonEventType = "dev.tekton.event.taskrun.unknown.v1"
// TaskRunSuccessfulEventV1 is sent for TaskRuns with "ConditionSucceeded" "True"
TaskRunSuccessfulEventV1 TektonEventType = "dev.tekton.event.taskrun.successful.v1"
// TaskRunFailedEventV1 is sent for TaskRuns with "ConditionSucceeded" "False"
TaskRunFailedEventV1 TektonEventType = "dev.tekton.event.taskrun.failed.v1"
// PipelineRunStartedEventV1 is sent for PipelineRuns with "ConditionSucceeded" "Unknown"
// the first time they are picked up by the reconciler
PipelineRunStartedEventV1 TektonEventType = "dev.tekton.event.pipelinerun.started.v1"
// PipelineRunRunningEventV1 is sent for PipelineRuns with "ConditionSucceeded" "Unknown"
// once the PipelineRun is validated and Pod created
PipelineRunRunningEventV1 TektonEventType = "dev.tekton.event.pipelinerun.running.v1"
// PipelineRunUnknownEventV1 is sent for PipelineRuns with "ConditionSucceeded" "Unknown"
// It can be used as a confirmation that the PipelineRun is still running.
PipelineRunUnknownEventV1 TektonEventType = "dev.tekton.event.pipelinerun.unknown.v1"
// PipelineRunSuccessfulEventV1 is sent for PipelineRuns with "ConditionSucceeded" "True"
PipelineRunSuccessfulEventV1 TektonEventType = "dev.tekton.event.pipelinerun.successful.v1"
// PipelineRunFailedEventV1 is sent for PipelineRuns with "ConditionSucceeded" "False"
PipelineRunFailedEventV1 TektonEventType = "dev.tekton.event.pipelinerun.failed.v1"
// CustomRunStartedEventV1 is sent for CustomRuns with "ConditionSucceeded" "Unknown"
// the first time they are picked up by the reconciler
CustomRunStartedEventV1 TektonEventType = "dev.tekton.event.customrun.started.v1"
// CustomRunRunningEventV1 is sent for CustomRuns with "ConditionSucceeded" "Unknown"
// once the CustomRun is validated and Pod created
CustomRunRunningEventV1 TektonEventType = "dev.tekton.event.customrun.running.v1"
// CustomRunSuccessfulEventV1 is sent for CustomRuns with "ConditionSucceeded" "True"
CustomRunSuccessfulEventV1 TektonEventType = "dev.tekton.event.customrun.successful.v1"
// CustomRunFailedEventV1 is sent for CustomRuns with "ConditionSucceeded" "False"
CustomRunFailedEventV1 TektonEventType = "dev.tekton.event.customrun.failed.v1"
)
func (t TektonEventType) String() string {
return string(t)
}
// CEClient wraps the `Client` interface from github.com/cloudevents/sdk-go/v2/cloudevents
// and has methods to count the cloud events being sent, those methods are for testing purposes.
type CEClient interface {
cloudevents.Client
// addCount increments the count of events to be sent
addCount()
// decreaseCount decrements the count of events to be sent, indicating the event has been sent
decreaseCount()
}
// TektonCloudEventData type is used to marshal and unmarshal the payload of
// a Tekton cloud event. It can include a TaskRun or a PipelineRun
type TektonCloudEventData struct {
TaskRun *v1beta1.TaskRun `json:"taskRun,omitempty"`
PipelineRun *v1beta1.PipelineRun `json:"pipelineRun,omitempty"`
CustomRun *v1beta1.CustomRun `json:"customRun,omitempty"`
}
// newTektonCloudEventData returns a new instance of TektonCloudEventData
func newTektonCloudEventData(ctx context.Context, runObject objectWithCondition) (TektonCloudEventData, error) {
tektonCloudEventData := TektonCloudEventData{}
switch v := runObject.(type) {
case *v1beta1.TaskRun:
tektonCloudEventData.TaskRun = v
case *v1beta1.PipelineRun:
tektonCloudEventData.PipelineRun = v
case *v1.TaskRun:
v1beta1TaskRun := &v1beta1.TaskRun{}
if err := v1beta1TaskRun.ConvertFrom(ctx, v); err != nil {
return TektonCloudEventData{}, err
}
tektonCloudEventData.TaskRun = v1beta1TaskRun
case *v1.PipelineRun:
v1beta1PipelineRun := &v1beta1.PipelineRun{}
if err := v1beta1PipelineRun.ConvertFrom(ctx, v); err != nil {
return TektonCloudEventData{}, err
}
tektonCloudEventData.PipelineRun = v1beta1PipelineRun
case *v1beta1.CustomRun:
tektonCloudEventData.CustomRun = v
}
return tektonCloudEventData, nil
}
// EventForObjectWithCondition creates a new event based for an objectWithCondition,
// or returns an error if not possible.
func EventForObjectWithCondition(ctx context.Context, runObject objectWithCondition) (*cloudevents.Event, error) {
event := cloudevents.NewEvent()
event.SetID(uuid.New().String())
event.SetSubject(runObject.GetObjectMeta().GetName())
// TODO: SelfLink is deprecated https://github.com/tektoncd/pipeline/issues/2676
source := runObject.GetObjectMeta().GetSelfLink()
if source == "" {
gvk := runObject.GetObjectKind().GroupVersionKind()
source = fmt.Sprintf("/apis/%s/%s/namespaces/%s/%s/%s",
gvk.Group,
gvk.Version,
runObject.GetObjectMeta().GetNamespace(),
gvk.Kind,
runObject.GetObjectMeta().GetName())
}
event.SetSource(source)
eventType, err := getEventType(runObject)
if err != nil {
return nil, err
}
if eventType == nil {
return nil, errors.New("no matching event type found")
}
event.SetType(eventType.String())
tektonCloudEventData, err := newTektonCloudEventData(ctx, runObject)
if err != nil {
return nil, err
}
if err := event.SetData(cloudevents.ApplicationJSON, tektonCloudEventData); err != nil {
return nil, err
}
return &event, nil
}
func getEventType(runObject objectWithCondition) (*TektonEventType, error) {
var eventType TektonEventType
c := runObject.GetStatusCondition().GetCondition(apis.ConditionSucceeded)
if c == nil {
// When the `Run` is created, it may not have any condition until it's
// picked up by the `Run` reconciler. In that case we consider the run
// as started. In all other cases, conditions have to be initialised
switch runObject.(type) {
case *v1beta1.CustomRun:
eventType = CustomRunStartedEventV1
return &eventType, nil
default:
return nil, fmt.Errorf("no condition for ConditionSucceeded in %T", runObject)
}
}
switch {
case c.IsUnknown():
switch runObject.(type) {
case *v1beta1.TaskRun:
switch c.Reason {
case v1beta1.TaskRunReasonStarted.String():
eventType = TaskRunStartedEventV1
case v1beta1.TaskRunReasonRunning.String():
eventType = TaskRunRunningEventV1
default:
eventType = TaskRunUnknownEventV1
}
case *v1.TaskRun:
switch c.Reason {
case v1.TaskRunReasonStarted.String():
eventType = TaskRunStartedEventV1
case v1.TaskRunReasonRunning.String():
eventType = TaskRunRunningEventV1
default:
eventType = TaskRunUnknownEventV1
}
case *v1beta1.PipelineRun:
switch c.Reason {
case v1beta1.PipelineRunReasonStarted.String():
eventType = PipelineRunStartedEventV1
case v1beta1.PipelineRunReasonRunning.String():
eventType = PipelineRunRunningEventV1
default:
eventType = PipelineRunUnknownEventV1
}
case *v1.PipelineRun:
switch c.Reason {
case v1.PipelineRunReasonStarted.String():
eventType = PipelineRunStartedEventV1
case v1.PipelineRunReasonRunning.String():
eventType = PipelineRunRunningEventV1
default:
eventType = PipelineRunUnknownEventV1
}
case *v1beta1.CustomRun:
// CustomRun controller have the freedom of setting reasons as they wish
// so we cannot make many assumptions here. If a condition is set
// to unknown (not finished), we sent the running event
eventType = CustomRunRunningEventV1
}
case c.IsFalse():
switch runObject.(type) {
case *v1.TaskRun:
eventType = TaskRunFailedEventV1
case *v1.PipelineRun:
eventType = PipelineRunFailedEventV1
case *v1beta1.TaskRun:
eventType = TaskRunFailedEventV1
case *v1beta1.PipelineRun:
eventType = PipelineRunFailedEventV1
case *v1beta1.CustomRun:
eventType = CustomRunFailedEventV1
}
case c.IsTrue():
switch runObject.(type) {
case *v1beta1.TaskRun:
eventType = TaskRunSuccessfulEventV1
case *v1beta1.PipelineRun:
eventType = PipelineRunSuccessfulEventV1
case *v1.TaskRun:
eventType = TaskRunSuccessfulEventV1
case *v1.PipelineRun:
eventType = PipelineRunSuccessfulEventV1
case *v1beta1.CustomRun:
eventType = CustomRunSuccessfulEventV1
}
default:
return nil, fmt.Errorf("unknown condition for in %T.Status %s", runObject, c.Status)
}
return &eventType, nil
}
// GetCloudEventDeliveryCompareOptions returns compare options to sort
// and compare a list of CloudEventDelivery
func GetCloudEventDeliveryCompareOptions() []cmp.Option {
// Setup cmp options
cloudDeliveryStateCompare := func(x, y v1beta1.CloudEventDeliveryState) bool {
return cmp.Equal(x.Condition, y.Condition) && cmp.Equal(x.RetryCount, y.RetryCount)
}
less := func(x, y v1beta1.CloudEventDelivery) bool {
return strings.Compare(x.Target, y.Target) < 0 || (strings.Compare(x.Target, y.Target) == 0 && x.Status.SentAt.Before(y.Status.SentAt))
}
return []cmp.Option{
cmpopts.SortSlices(less),
cmp.Comparer(func(x, y v1beta1.CloudEventDelivery) bool {
return (strings.Compare(x.Target, y.Target) == 0) && cloudDeliveryStateCompare(x.Status, y.Status)
}),
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudevent
import (
"context"
"net/http"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/cloudevents/sdk-go/v2/client"
"github.com/cloudevents/sdk-go/v2/event"
"github.com/cloudevents/sdk-go/v2/protocol"
"k8s.io/client-go/rest"
"knative.dev/pkg/injection"
"knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterClient(func(ctx context.Context, _ *rest.Config) context.Context {
return withCloudEventClient(ctx)
})
}
// ceKey is used to associate the CloudEventClient inside the context.Context
type ceKey struct{}
func withCloudEventClient(ctx context.Context) context.Context {
logger := logging.FromContext(ctx)
// When KeepAlive is enabled the connections are not reused - see
// Bug https://github.com/tektoncd/pipeline/issues/3190. This causes the
// number of connections to keep growing, even if when we limit max idle
// connections in the transport.
// TODO(afrittoli) Re-enable keep alive and ensure connections are reused
// See feature https://github.com/tektoncd/pipeline/issues/3204
var useOnceTransport http.RoundTripper = &http.Transport{
DisableKeepAlives: true,
}
p, err := cloudevents.NewHTTP(cloudevents.WithRoundTripper(useOnceTransport))
if err != nil {
logger.Panicf("Error creating the cloudevents http protocol: %s", err)
}
cloudEventClient, err := cloudevents.NewClient(p, cloudevents.WithUUIDs(), cloudevents.WithTimeNow())
if err != nil {
logger.Panicf("Error creating the cloudevents client: %s", err)
}
celient := CloudClient{
client: cloudEventClient,
}
return context.WithValue(ctx, ceKey{}, celient)
}
// CloudClient is a wrapper of CloudEvents client and implements addCount and decreaseCount
type CloudClient struct {
client client.Client
}
// AddCount does nothing
func (c CloudClient) addCount() {
}
// DecreaseCount does nothing
func (c CloudClient) decreaseCount() {
}
// Send invokes call client.Send
func (c CloudClient) Send(ctx context.Context, event cloudevents.Event) protocol.Result {
return c.client.Send(ctx, event)
}
// Request invokes client.Request
func (c CloudClient) Request(ctx context.Context, event event.Event) (*cloudevents.Event, protocol.Result) {
return c.client.Request(ctx, event)
}
// StartReceiver invokes client.StartReceiver
func (c CloudClient) StartReceiver(ctx context.Context, fn interface{}) error {
return c.client.StartReceiver(ctx, fn)
}
// Get extracts the cloudEventClient client from the context.
func Get(ctx context.Context) CEClient {
untyped := ctx.Value(ceKey{})
if untyped == nil {
logging.FromContext(ctx).Errorf(
"Unable to fetch client from context.")
return nil
}
return untyped.(CEClient)
}
// ToContext adds the cloud events client to the context
func ToContext(ctx context.Context, cec CEClient) context.Context {
return context.WithValue(ctx, ceKey{}, cec)
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudevent
import (
"context"
"fmt"
"regexp"
"sync"
"testing"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/cloudevents/sdk-go/v2/protocol"
)
// FakeClientBehaviour defines how the client will behave
type FakeClientBehaviour struct {
SendSuccessfully bool
}
// FakeClient is a fake CloudEvent client for unit testing
// Holding a pointer to the behaviour allows to change the behaviour of a client
type FakeClient struct {
behaviour *FakeClientBehaviour
// Modelled after k8s.io/client-go fake recorder
events chan string
// waitGroup is used to block until all events have been sent
waitGroup *sync.WaitGroup
}
// newFakeClient is a FakeClient factory, it returns a client for the target
func newFakeClient(behaviour *FakeClientBehaviour, expectedEventCount int) CEClient {
return FakeClient{
behaviour: behaviour,
// set buffersize to length of want events to make sure no extra events are sent
events: make(chan string, expectedEventCount),
waitGroup: &sync.WaitGroup{},
}
}
var _ cloudevents.Client = (*FakeClient)(nil)
// Send fakes the Send method from cloudevents.Client
func (c FakeClient) Send(ctx context.Context, event cloudevents.Event) protocol.Result {
if c.behaviour.SendSuccessfully {
// This is to prevent extra events are sent. We don't read events from channel before we call CheckCloudEventsUnordered
if len(c.events) < cap(c.events) {
c.events <- event.String()
return nil
}
return fmt.Errorf("channel is full of size:%v, but extra event wants to be sent:%v", cap(c.events), event)
}
return fmt.Errorf("had to fail. Event ID: %s", event.ID())
}
// Request fakes the Request method from cloudevents.Client
func (c FakeClient) Request(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, protocol.Result) {
if c.behaviour.SendSuccessfully {
if len(c.events) < cap(c.events) {
c.events <- event.String()
return &event, nil
}
return nil, fmt.Errorf("channel is full of size:%v, but extra event wants to be sent:%v", cap(c.events), event)
}
return nil, fmt.Errorf("had to fail. Event ID: %s", event.ID())
}
// StartReceiver fakes StartReceiver method from cloudevents.Client
func (c FakeClient) StartReceiver(ctx context.Context, fn interface{}) error {
return nil
}
// addCount can be used to add the count when each event is going to be sent
func (c FakeClient) addCount() {
c.waitGroup.Add(1)
}
// decreaseCount can be used to the decrease the count when each event is sent
func (c FakeClient) decreaseCount() {
c.waitGroup.Done()
}
// WithFakeClient adds to the context a fake client with the desired behaviour and expectedEventCount
func WithFakeClient(ctx context.Context, behaviour *FakeClientBehaviour, expectedEventCount int) context.Context {
return context.WithValue(ctx, ceKey{}, newFakeClient(behaviour, expectedEventCount))
}
// CheckCloudEventsUnordered checks that all events in wantEvents, and no others,
// were received via the given chan, in any order.
// Block until all events have been sent.
func (c *FakeClient) CheckCloudEventsUnordered(t *testing.T, testName string, wantEvents []string) {
t.Helper()
c.waitGroup.Wait()
expected := append([]string{}, wantEvents...)
channelEvents := len(c.events)
// extra events are prevented in FakeClient's Send function.
// fewer events are detected because we collect all events from channel and compare with wantEvents
for range channelEvents {
event := <-c.events
if len(expected) == 0 {
t.Errorf("extra event received: %q", event)
}
found := false
for wantIdx, want := range expected {
matching, err := regexp.MatchString(want, event)
if err != nil {
t.Errorf("something went wrong matching an event: %s", err)
}
if matching {
found = true
// Remove event from list of those we expect to receive
expected[wantIdx] = expected[len(expected)-1]
expected = expected[:len(expected)-1]
break
}
}
if !found {
t.Errorf("unexpected event received: %q", event)
}
}
if len(expected) != 0 {
t.Errorf("%d events %#v are not received", len(expected), expected)
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package events
import (
"context"
"github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
"github.com/tektoncd/pipeline/pkg/reconciler/events/k8sevent"
"k8s.io/apimachinery/pkg/runtime"
"knative.dev/pkg/apis"
)
// Emit emits events for object
// Two types of events are supported, k8s and cloud events.
//
// k8s events are always sent if afterCondition is different from beforeCondition
// Cloud events are always sent if enabled, i.e. if a sink is available
func Emit(ctx context.Context, beforeCondition *apis.Condition, afterCondition *apis.Condition, object runtime.Object) {
k8sevent.EmitK8sEvents(ctx, beforeCondition, afterCondition, object)
cloudevent.EmitCloudEventsWhenConditionChange(ctx, beforeCondition, afterCondition, object)
}
// EmitCloudEvents is refactored to cloudevent, this is to avoid breaking change
var EmitCloudEvents = cloudevent.EmitCloudEvents
// EmitError is refactored to k8sevent, this is to avoid breaking change
var EmitError = k8sevent.EmitError
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8sevent
import (
"context"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
"knative.dev/pkg/apis"
"knative.dev/pkg/controller"
)
const (
// EventReasonSucceded is the reason set for events about successful completion of TaskRuns / PipelineRuns
EventReasonSucceded = "Succeeded"
// EventReasonFailed is the reason set for events about unsuccessful completion of TaskRuns / PipelineRuns
EventReasonFailed = "Failed"
// EventReasonStarted is the reason set for events about the start of TaskRuns / PipelineRuns
EventReasonStarted = "Started"
// EventReasonError is the reason set for events related to TaskRuns / PipelineRuns reconcile errors
EventReasonError = "Error"
)
// EmitK8sEvents emits kubernetes events for object
// k8s events are always sent if afterCondition is different from beforeCondition
func EmitK8sEvents(ctx context.Context, beforeCondition *apis.Condition, afterCondition *apis.Condition, object runtime.Object) {
recorder := controller.GetEventRecorder(ctx)
// Events that are going to be sent
//
// Status "ConditionUnknown":
// beforeCondition == nil, emit EventReasonStarted
// beforeCondition != nil, emit afterCondition.Reason
//
// Status "ConditionTrue": emit EventReasonSucceded
// Status "ConditionFalse": emit EventReasonFailed
if !equality.Semantic.DeepEqual(beforeCondition, afterCondition) && afterCondition != nil {
// If the condition changed, and the target condition is not empty, we send an event
switch afterCondition.Status {
case corev1.ConditionTrue:
recorder.Event(object, corev1.EventTypeNormal, EventReasonSucceded, afterCondition.Message)
case corev1.ConditionFalse:
recorder.Event(object, corev1.EventTypeWarning, EventReasonFailed, afterCondition.Message)
case corev1.ConditionUnknown:
if beforeCondition == nil {
// If the condition changed, the status is "unknown", and there was no condition before,
// we emit the "Started event". We ignore further updates of the "unknown" status.
recorder.Event(object, corev1.EventTypeNormal, EventReasonStarted, "")
} else {
// If the condition changed, the status is "unknown", and there was a condition before,
// we emit an event that matches the reason and message of the condition.
// This is used for instance to signal the transition from "started" to "running"
recorder.Event(object, corev1.EventTypeNormal, afterCondition.Reason, afterCondition.Message)
}
}
}
}
// EmitError emits a failure associated to an error
func EmitError(c record.EventRecorder, err error, object runtime.Object) {
if err != nil {
c.Event(object, corev1.EventTypeWarning, EventReasonError, err.Error())
}
}
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8sevent
import (
"fmt"
"regexp"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/wait"
)
// CheckEventsOrdered checks that the events received via the given chan are the same as wantEvents,
// in the same order.
func CheckEventsOrdered(t *testing.T, eventChan chan string, testName string, wantEvents []string) error {
t.Helper()
err := eventsFromChannel(eventChan, wantEvents)
if err != nil {
return fmt.Errorf("error in test %s: %w", testName, err)
}
return nil
}
// eventsFromChannel takes a chan of string, a test name, and a list of events that a test
// expects to receive. The events must be received in the same order they appear in the
// wantEvents list. Any extra or too few received events are considered errors.
func eventsFromChannel(c chan string, wantEvents []string) error {
// We get events from a channel, so the timeout is here to avoid waiting
// on the channel forever if fewer than expected events are received.
// We only hit the timeout in case of failure of the test, so the actual value
// of the timeout is not so relevant, it's only used when tests are going to fail.
// on the channel forever if fewer than expected events are received
timer := time.After(wait.ForeverTestTimeout)
foundEvents := []string{}
for ii := range wantEvents {
// We loop over all the events that we expect. Once they are all received
// we exit the loop. If we never receive enough events, the timeout takes us
// out of the loop.
select {
case event := <-c:
foundEvents = append(foundEvents, event)
wantEvent := wantEvents[ii]
// If the event is an exact match, there is no need to use regular expressions for matching.
// This can avoid the need to escape special characters, such as *, in the event to match.
if wantEvent == event {
continue
}
matching, err := regexp.MatchString(wantEvent, event)
if err == nil {
if !matching {
return fmt.Errorf("expected event \"%s\" but got \"%s\" instead", wantEvent, event)
}
} else {
return fmt.Errorf("something went wrong matching the event: %w", err)
}
case <-timer:
return fmt.Errorf("received %d events but %d expected. Found events: %#v", len(foundEvents), len(wantEvents), foundEvents)
}
}
// Check if there are extra events in the channel, return error if found.
for {
select {
case event := <-c:
return fmt.Errorf("unexpected event: %q", event)
default:
return nil
}
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package customrun
import (
"context"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
customruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun"
customrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/customrun"
cacheclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cache"
cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
)
// NewController instantiates a new controller.Impl from knative.dev/pkg/controller
// This is a read-only controller, hence the SkipStatusUpdates set to true
func NewController() func(context.Context, configmap.Watcher) *controller.Impl {
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
logger := logging.FromContext(ctx)
customRunInformer := customruninformer.Get(ctx)
configStore := config.NewStore(logger.Named("config-store"))
configStore.WatchConfigs(cmw)
c := &Reconciler{
cloudEventClient: cloudeventclient.Get(ctx),
cacheClient: cacheclient.Get(ctx),
}
impl := customrunreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options {
return controller.Options{
AgentName: pipeline.CustomRunControllerName,
ConfigStore: configStore,
SkipStatusUpdates: true,
}
})
if _, err := customRunInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)); err != nil {
logging.FromContext(ctx).Panicf("Couldn't register CustomRun informer event handler: %w", err)
}
return impl
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package customrun
import (
"context"
lru "github.com/hashicorp/golang-lru"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
customrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/customrun"
"github.com/tektoncd/pipeline/pkg/reconciler/events"
"github.com/tektoncd/pipeline/pkg/reconciler/events/cache"
"github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
"knative.dev/pkg/apis"
"knative.dev/pkg/logging"
pkgreconciler "knative.dev/pkg/reconciler"
)
// Reconciler implements controller.Reconciler for Configuration resources.
type Reconciler struct {
cloudEventClient cloudevent.CEClient
cacheClient *lru.Cache
}
// Check that our Reconciler implements customrunreconciler.Interface
var (
_ customrunreconciler.Interface = (*Reconciler)(nil)
)
// ReconcileKind compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the CustomRun
// resource with the current status of the resource.
func (c *Reconciler) ReconcileKind(ctx context.Context, customRun *v1beta1.CustomRun) pkgreconciler.Event {
logger := logging.FromContext(ctx)
configs := config.FromContextOrDefaults(ctx)
ctx = cloudevent.ToContext(ctx, c.cloudEventClient)
ctx = cache.ToContext(ctx, c.cacheClient)
logger.Infof("Reconciling %s", customRun.Name)
// Create a copy of the CustomRun object, just in case, to avoid sync'ing changes
customRunEvents := *customRun.DeepCopy()
if configs.FeatureFlags.SendCloudEventsForRuns {
// Custom task controllers may be sending events for "CustomRuns" associated
// to the custom tasks they control. To avoid sending duplicate events,
// CloudEvents for "CustomRuns" are only sent when enabled
// Read and log the condition
condition := customRunEvents.Status.GetCondition(apis.ConditionSucceeded)
logger.Debugf("Emitting cloudevent for %s, condition: %s", customRunEvents.Name, condition)
events.EmitCloudEvents(ctx, &customRunEvents)
}
return nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dag
import (
"errors"
"fmt"
"sort"
"strings"
"github.com/tektoncd/pipeline/pkg/list"
"k8s.io/apimachinery/pkg/util/sets"
)
// Task is an interface for all types that could be in a DAG
type Task interface {
HashKey() string
Deps() []string
}
// Tasks is an interface for lists of types that could be in a DAG
type Tasks interface {
Items() []Task
}
// Node represents a Task in a pipeline.
type Node struct {
// Key represent a unique name of the node in a graph
Key string
// Prev represent all the Previous task Nodes for the current Task
Prev []*Node
// Next represent all the Next task Nodes for the current Task
Next []*Node
}
// Graph represents the Pipeline Graph
type Graph struct {
// Nodes represent map of PipelineTask name to Node in Pipeline Graph
Nodes map[string]*Node
}
// Returns an empty Pipeline Graph
func newGraph() *Graph {
return &Graph{Nodes: map[string]*Node{}}
}
func (g *Graph) addPipelineTask(t Task) (*Node, error) {
if _, ok := g.Nodes[t.HashKey()]; ok {
return nil, errors.New("duplicate pipeline task")
}
newNode := &Node{
Key: t.HashKey(),
}
g.Nodes[t.HashKey()] = newNode
return newNode, nil
}
// Build returns a valid pipeline Graph. Returns error if the pipeline is invalid
func Build(tasks Tasks, deps map[string][]string) (*Graph, error) {
d := newGraph()
// Add all Tasks mentioned in the `PipelineSpec`
for _, pt := range tasks.Items() {
if _, err := d.addPipelineTask(pt); err != nil {
return nil, fmt.Errorf("task %s is already present in Graph, can't add it again: %w", pt.HashKey(), err)
}
}
// Ensure no cycles in the graph
if err := findCyclesInDependencies(deps); err != nil {
return nil, fmt.Errorf("cycle detected; %w", err)
}
// Process all from and runAfter constraints to add task dependency
for pt, taskDeps := range deps {
for _, previousTask := range taskDeps {
if err := addLink(pt, previousTask, d.Nodes); err != nil {
return nil, fmt.Errorf("couldn't add link between %s and %s: %w", pt, previousTask, err)
}
}
}
return d, nil
}
// GetCandidateTasks returns a set of names of PipelineTasks whose ancestors are all completed,
// given a list of finished doneTasks. If the specified
// doneTasks are invalid (i.e. if it is indicated that a Task is done, but the
// previous Tasks are not done), an error is returned.
func GetCandidateTasks(g *Graph, doneTasks ...string) (sets.String, error) {
roots := getRoots(g)
tm := sets.NewString(doneTasks...)
d := sets.NewString()
visited := sets.NewString()
for _, root := range roots {
schedulable := findSchedulable(root, visited, tm)
for _, taskName := range schedulable {
d.Insert(taskName)
}
}
var visitedNames []string
for v := range visited {
visitedNames = append(visitedNames, v)
}
notVisited := list.DiffLeft(doneTasks, visitedNames)
if len(notVisited) > 0 {
return nil, fmt.Errorf("invalid list of done tasks; some tasks were indicated completed without ancestors being done: %v", notVisited)
}
return d, nil
}
func linkPipelineTasks(prev *Node, next *Node) {
next.Prev = append(next.Prev, prev)
prev.Next = append(prev.Next, next)
}
// use Kahn's algorithm to find cycles in dependencies
func findCyclesInDependencies(deps map[string][]string) error {
independentTasks := sets.NewString()
dag := make(map[string]sets.String, len(deps))
childMap := make(map[string]sets.String, len(deps))
for task, taskDeps := range deps {
if len(taskDeps) == 0 {
continue
}
dag[task] = sets.NewString(taskDeps...)
for _, dep := range taskDeps {
if len(deps[dep]) == 0 {
independentTasks.Insert(dep)
}
if children, ok := childMap[dep]; ok {
children.Insert(task)
} else {
childMap[dep] = sets.NewString(task)
}
}
}
for {
parent, ok := independentTasks.PopAny()
if !ok {
break
}
children := childMap[parent]
for {
child, ok := children.PopAny()
if !ok {
break
}
dag[child].Delete(parent)
if dag[child].Len() == 0 {
independentTasks.Insert(child)
delete(dag, child)
}
}
}
return getInterdependencyError(dag)
}
func getInterdependencyError(dag map[string]sets.String) error {
if len(dag) == 0 {
return nil
}
firstChild := ""
for task := range dag {
if firstChild == "" || firstChild > task {
firstChild = task
}
}
deps := dag[firstChild].List()
depNames := make([]string, 0, len(deps))
sort.Strings(deps)
for _, dep := range deps {
depNames = append(depNames, fmt.Sprintf("%q", dep))
}
return fmt.Errorf("task %q depends on %s", firstChild, strings.Join(depNames, ", "))
}
func addLink(pt string, previousTask string, nodes map[string]*Node) error {
prev, ok := nodes[previousTask]
if !ok {
return fmt.Errorf("task %s depends on %s but %s wasn't present in Pipeline", pt, previousTask, previousTask)
}
next := nodes[pt]
linkPipelineTasks(prev, next)
return nil
}
func getRoots(g *Graph) []*Node {
n := []*Node{}
for _, node := range g.Nodes {
if len(node.Prev) == 0 {
n = append(n, node)
}
}
return n
}
func findSchedulable(n *Node, visited sets.String, doneTasks sets.String) []string {
if visited.Has(n.Key) {
return []string{}
}
visited.Insert(n.Key)
if doneTasks.Has(n.Key) {
schedulable := []string{}
// This one is done! Take note of it and look at the next candidate
for _, next := range n.Next {
if _, ok := visited[next.Key]; !ok {
schedulable = append(schedulable, findSchedulable(next, visited, doneTasks)...)
}
}
return schedulable
}
// This one isn't done! Return it if it's schedulable
if isSchedulable(doneTasks, n.Prev) {
// FIXME(vdemeester)
return []string{n.Key}
}
// This one isn't done, but it also isn't ready to schedule
return []string{}
}
func isSchedulable(doneTasks sets.String, prevs []*Node) bool {
if len(prevs) == 0 {
return true
}
collected := []string{}
for _, n := range prevs {
if doneTasks.Has(n.Key) {
collected = append(collected, n.Key)
}
}
return len(collected) == len(prevs)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipelinerun
import (
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/internal/affinityassistant"
aa "github.com/tektoncd/pipeline/pkg/internal/affinityassistant"
pipelinePod "github.com/tektoncd/pipeline/pkg/pod"
"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
"github.com/tektoncd/pipeline/pkg/workspace"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
errorutils "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/kmeta"
"knative.dev/pkg/logging"
)
const (
// ReasonCouldntCreateOrUpdateAffinityAssistantStatefulSet indicates that a PipelineRun uses workspaces with PersistentVolumeClaim
// as a volume source and expect an Assistant StatefulSet in AffinityAssistantPerWorkspace behavior, but couldn't create a StatefulSet.
ReasonCouldntCreateOrUpdateAffinityAssistantStatefulSet = "ReasonCouldntCreateOrUpdateAffinityAssistantStatefulSet"
)
var (
// Deprecated: use volumeclain.ErrPvcCreationFailed instead
ErrPvcCreationFailed = volumeclaim.ErrPvcCreationFailed
// Deprecated: use volumeclaim.ErrAffinityAssistantCreationFailed instead
ErrPvcCreationFailedRetryable = volumeclaim.ErrPvcCreationFailedRetryable
ErrAffinityAssistantCreationFailed = errors.New("Affinity Assistant creation error")
)
// createOrUpdateAffinityAssistantsAndPVCs creates Affinity Assistant StatefulSets and PVCs based on AffinityAssistantBehavior.
// This is done to achieve Node Affinity for taskruns in a pipelinerun, and make it possible for the taskruns to execute parallel while sharing volume.
// If the AffinityAssistantBehavior is AffinityAssistantPerWorkspace, it creates an Affinity Assistant for
// every taskrun in the pipelinerun that use the same PVC based volume.
// If the AffinityAssistantBehavior is AffinityAssistantPerPipelineRun or AffinityAssistantPerPipelineRunWithIsolation,
// it creates one Affinity Assistant for the pipelinerun.
func (c *Reconciler) createOrUpdateAffinityAssistantsAndPVCs(ctx context.Context, pr *v1.PipelineRun, aaBehavior aa.AffinityAssistantBehavior) error {
var unschedulableNodes sets.Set[string] = nil
var claimTemplates []corev1.PersistentVolumeClaim
var claimNames []string
claimNameToWorkspaceName := map[string]string{}
claimTemplateToWorkspace := map[*corev1.PersistentVolumeClaim]v1.WorkspaceBinding{}
for _, w := range pr.Spec.Workspaces {
if w.PersistentVolumeClaim == nil && w.VolumeClaimTemplate == nil {
continue
}
if w.PersistentVolumeClaim != nil {
claim := w.PersistentVolumeClaim
claimNames = append(claimNames, claim.ClaimName)
claimNameToWorkspaceName[claim.ClaimName] = w.Name
} else if w.VolumeClaimTemplate != nil {
claimTemplate := w.VolumeClaimTemplate.DeepCopy()
claimTemplate.Name = volumeclaim.GeneratePVCNameFromWorkspaceBinding(w.VolumeClaimTemplate.Name, w, *kmeta.NewControllerRef(pr))
claimTemplates = append(claimTemplates, *claimTemplate)
claimTemplateToWorkspace[claimTemplate] = w
}
}
switch aaBehavior {
case aa.AffinityAssistantPerWorkspace:
for claimName, workspaceName := range claimNameToWorkspaceName {
aaName := GetAffinityAssistantName(workspaceName, pr.Name)
if err := c.createOrUpdateAffinityAssistant(ctx, aaName, pr, nil, []string{claimName}, unschedulableNodes); err != nil {
return fmt.Errorf("%w: %v", ErrAffinityAssistantCreationFailed, err)
}
}
for claimTemplate, workspace := range claimTemplateToWorkspace {
// To support PVC auto deletion at pipelinerun deletion time, the OwnerReference of the PVCs should be set to the owning pipelinerun instead of the StatefulSets,
// so we create PVCs from PipelineRuns' VolumeClaimTemplate and pass the PVCs to the Affinity Assistant StatefulSet for volume scheduling.
if err := c.pvcHandler.CreatePVCFromVolumeClaimTemplate(ctx, workspace, *kmeta.NewControllerRef(pr), pr.Namespace); err != nil {
return err
}
aaName := GetAffinityAssistantName(workspace.Name, pr.Name)
if err := c.createOrUpdateAffinityAssistant(ctx, aaName, pr, nil, []string{claimTemplate.Name}, unschedulableNodes); err != nil {
return fmt.Errorf("%w: %v", ErrAffinityAssistantCreationFailed, err)
}
}
case aa.AffinityAssistantPerPipelineRun, aa.AffinityAssistantPerPipelineRunWithIsolation:
aaName := GetAffinityAssistantName("", pr.Name)
// The PVCs are created via StatefulSet's VolumeClaimTemplate for volume scheduling
// in AffinityAssistantPerPipelineRun or AffinityAssistantPerPipelineRunWithIsolation modes.
// This is because PVCs from pipelinerun's VolumeClaimTemplate are enforced to be deleted at pipelinerun completion time in these modes,
// and there is no requirement of the PVC OwnerReference.
if err := c.createOrUpdateAffinityAssistant(ctx, aaName, pr, claimTemplates, claimNames, unschedulableNodes); err != nil {
return fmt.Errorf("%w: %v", ErrAffinityAssistantCreationFailed, err)
}
case aa.AffinityAssistantDisabled:
for _, workspace := range claimTemplateToWorkspace {
if err := c.pvcHandler.CreatePVCFromVolumeClaimTemplate(ctx, workspace, *kmeta.NewControllerRef(pr), pr.Namespace); err != nil {
return err
}
}
}
return nil
}
// createOrUpdateAffinityAssistant creates an Affinity Assistant Statefulset with the provided affinityAssistantName and pipelinerun information.
// The VolumeClaimTemplates and Volumes of StatefulSet reference the resolved claimTemplates and claims respectively.
// It maintains a set of unschedulableNodes to detect and recreate Affinity Assistant in case of the node is cordoned to avoid pipelinerun deadlock.
func (c *Reconciler) createOrUpdateAffinityAssistant(ctx context.Context, affinityAssistantName string, pr *v1.PipelineRun, claimTemplates []corev1.PersistentVolumeClaim, claimNames []string, unschedulableNodes sets.Set[string]) []error {
logger := logging.FromContext(ctx)
cfg := config.FromContextOrDefaults(ctx)
var errs []error
a, err := c.KubeClientSet.AppsV1().StatefulSets(pr.Namespace).Get(ctx, affinityAssistantName, metav1.GetOptions{})
switch {
// check whether the affinity assistant (StatefulSet) exists or not, create one if it does not exist
case apierrors.IsNotFound(err):
aaBehavior, err := aa.GetAffinityAssistantBehavior(ctx)
if err != nil {
return []error{err}
}
securityContextConfig := pipelinePod.SecurityContextConfig{
SetSecurityContext: cfg.FeatureFlags.SetSecurityContext,
SetReadOnlyRootFilesystem: cfg.FeatureFlags.SetSecurityContextReadOnlyRootFilesystem,
}
containerConfig := aa.ContainerConfig{
Image: c.Images.NopImage,
SecurityContextConfig: securityContextConfig,
}
affinityAssistantStatefulSet := affinityAssistantStatefulSet(aaBehavior, affinityAssistantName, pr, claimTemplates, claimNames, containerConfig, cfg.Defaults.DefaultAAPodTemplate)
_, err = c.KubeClientSet.AppsV1().StatefulSets(pr.Namespace).Create(ctx, affinityAssistantStatefulSet, metav1.CreateOptions{})
if err != nil {
errs = append(errs, fmt.Errorf("failed to create StatefulSet %s: %w", affinityAssistantName, err))
}
if err == nil {
logger.Infof("Created StatefulSet %s in namespace %s", affinityAssistantName, pr.Namespace)
}
// check whether the affinity assistant (StatefulSet) exists and the affinity assistant pod is created
// this check requires the StatefulSet to have the readyReplicas set to 1 to allow for any delay between the StatefulSet creation
// and the necessary pod creation, the delay can be caused by any dependency on PVCs and PVs creation
// this case addresses issues specified in https://github.com/tektoncd/pipeline/issues/6586
case err == nil && a != nil && a.Status.ReadyReplicas == 1:
if unschedulableNodes == nil {
ns, err := c.KubeClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{
FieldSelector: "spec.unschedulable=true",
})
if err != nil {
errs = append(errs, fmt.Errorf("could not get the list of nodes, err: %w", err))
}
unschedulableNodes = sets.Set[string]{}
// maintain the list of nodes which are unschedulable
for _, n := range ns.Items {
unschedulableNodes.Insert(n.Name)
}
}
if unschedulableNodes.Len() > 0 {
// get the pod created for a given StatefulSet, pod is assigned ordinal of 0 with the replicas set to 1
p, err := c.KubeClientSet.CoreV1().Pods(pr.Namespace).Get(ctx, a.Name+"-0", metav1.GetOptions{})
// ignore instead of failing if the affinity assistant pod was not found
if err != nil && !apierrors.IsNotFound(err) {
errs = append(errs, fmt.Errorf("could not get the affinity assistant pod for StatefulSet %s: %w", a.Name, err))
}
// check the node which hosts the affinity assistant pod if it is unschedulable or cordoned
if p != nil && unschedulableNodes.Has(p.Spec.NodeName) {
// if the node is unschedulable, delete the affinity assistant pod such that a StatefulSet can recreate the same pod on a different node
err = c.KubeClientSet.CoreV1().Pods(p.Namespace).Delete(ctx, p.Name, metav1.DeleteOptions{})
if err != nil {
errs = append(errs, fmt.Errorf("error deleting affinity assistant pod %s in ns %s: %w", p.Name, p.Namespace, err))
}
}
}
case err != nil:
errs = append(errs, fmt.Errorf("failed to retrieve StatefulSet %s: %w", affinityAssistantName, err))
}
return errs
}
// cleanupAffinityAssistantsAndPVCs deletes Affinity Assistant StatefulSets and PVCs created from VolumeClaimTemplates
func (c *Reconciler) cleanupAffinityAssistantsAndPVCs(ctx context.Context, pr *v1.PipelineRun) error {
aaBehavior, err := aa.GetAffinityAssistantBehavior(ctx)
if err != nil {
return err
}
var errs []error
switch aaBehavior {
case aa.AffinityAssistantPerWorkspace:
// TODO (#5776): support optional PVC deletion behavior for per-workspace mode
for _, w := range pr.Spec.Workspaces {
if w.PersistentVolumeClaim != nil || w.VolumeClaimTemplate != nil {
affinityAssistantName := GetAffinityAssistantName(w.Name, pr.Name)
if err := c.KubeClientSet.AppsV1().StatefulSets(pr.Namespace).Delete(ctx, affinityAssistantName, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
errs = append(errs, fmt.Errorf("failed to delete StatefulSet %s: %w", affinityAssistantName, err))
}
}
}
case aa.AffinityAssistantPerPipelineRun, aa.AffinityAssistantPerPipelineRunWithIsolation:
affinityAssistantName := GetAffinityAssistantName("", pr.Name)
if err := c.KubeClientSet.AppsV1().StatefulSets(pr.Namespace).Delete(ctx, affinityAssistantName, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
errs = append(errs, fmt.Errorf("failed to delete StatefulSet %s: %w", affinityAssistantName, err))
}
// cleanup PVCs created by Affinity Assistants
for _, w := range pr.Spec.Workspaces {
if w.VolumeClaimTemplate != nil {
pvcName := getPersistentVolumeClaimNameWithAffinityAssistant("", pr.Name, w, *kmeta.NewControllerRef(pr))
if err := c.pvcHandler.PurgeFinalizerAndDeletePVCForWorkspace(ctx, pvcName, pr.Namespace); err != nil {
errs = append(errs, err)
}
}
}
case aa.AffinityAssistantDisabled:
return nil
}
return errorutils.NewAggregate(errs)
}
// getPersistentVolumeClaimNameWithAffinityAssistant returns the PersistentVolumeClaim name that is
// created by the Affinity Assistant StatefulSet VolumeClaimTemplate when Affinity Assistant is enabled.
// The PVCs created by StatefulSet VolumeClaimTemplates follow the format `<pvcName>-<affinityAssistantName>-0`
func getPersistentVolumeClaimNameWithAffinityAssistant(pipelineWorkspaceName, prName string, wb v1.WorkspaceBinding, owner metav1.OwnerReference) string {
pvcName := volumeclaim.GeneratePVCNameFromWorkspaceBinding(wb.VolumeClaimTemplate.Name, wb, owner)
affinityAssistantName := GetAffinityAssistantName(pipelineWorkspaceName, prName)
return fmt.Sprintf("%s-%s-0", pvcName, affinityAssistantName)
}
// getAffinityAssistantAnnotationVal generates and returns the value for `pipeline.tekton.dev/affinity-assistant` annotation
// based on aaBehavior, pipelinePVCWorkspaceName and prName
func getAffinityAssistantAnnotationVal(aaBehavior affinityassistant.AffinityAssistantBehavior, pipelinePVCWorkspaceName string, prName string) string {
switch aaBehavior {
case affinityassistant.AffinityAssistantPerWorkspace:
if pipelinePVCWorkspaceName != "" {
return GetAffinityAssistantName(pipelinePVCWorkspaceName, prName)
}
case affinityassistant.AffinityAssistantPerPipelineRun, affinityassistant.AffinityAssistantPerPipelineRunWithIsolation:
return GetAffinityAssistantName("", prName)
case affinityassistant.AffinityAssistantDisabled:
}
return ""
}
// GetAffinityAssistantName returns the Affinity Assistant name based on pipelineWorkspaceName and pipelineRunName
func GetAffinityAssistantName(pipelineWorkspaceName string, pipelineRunName string) string {
hashBytes := sha256.Sum256([]byte(pipelineWorkspaceName + pipelineRunName))
hashString := hex.EncodeToString(hashBytes[:])
return fmt.Sprintf("%s-%s", workspace.ComponentNameAffinityAssistant, hashString[:10])
}
func getStatefulSetLabels(pr *v1.PipelineRun, affinityAssistantName string) map[string]string {
// Propagate labels from PipelineRun to StatefulSet.
labels := make(map[string]string, len(pr.ObjectMeta.Labels)+1)
for key, val := range pr.ObjectMeta.Labels {
labels[key] = val
}
labels[pipeline.PipelineRunLabelKey] = pr.Name
// LabelInstance is used to configure PodAffinity for all TaskRuns belonging to this Affinity Assistant
// LabelComponent is used to configure PodAntiAffinity to other Affinity Assistants
labels[workspace.LabelInstance] = affinityAssistantName
labels[workspace.LabelComponent] = workspace.ComponentNameAffinityAssistant
return labels
}
// affinityAssistantStatefulSet returns an Affinity Assistant as a StatefulSet based on the AffinityAssistantBehavior
// with the given AffinityAssistantTemplate applied to the StatefulSet PodTemplateSpec.
// The VolumeClaimTemplates and Volume of StatefulSet reference the PipelineRun WorkspaceBinding VolumeClaimTempalte and the PVCs respectively.
// The PVs created by the StatefulSet are scheduled to the same availability zone which avoids PV scheduling conflict.
func affinityAssistantStatefulSet(aaBehavior aa.AffinityAssistantBehavior, name string, pr *v1.PipelineRun, claimTemplates []corev1.PersistentVolumeClaim, claimNames []string, containerConfig aa.ContainerConfig, defaultAATpl *pod.AffinityAssistantTemplate) *appsv1.StatefulSet {
// We want a singleton pod
replicas := int32(1)
tpl := &pod.AffinityAssistantTemplate{}
// merge pod template from spec and default if any of them are defined
if pr.Spec.TaskRunTemplate.PodTemplate != nil || defaultAATpl != nil {
tpl = pod.MergeAAPodTemplateWithDefault(pr.Spec.TaskRunTemplate.PodTemplate.ToAffinityAssistantTemplate(), defaultAATpl)
}
var mounts []corev1.VolumeMount
for _, claimTemplate := range claimTemplates {
mounts = append(mounts, corev1.VolumeMount{Name: claimTemplate.Name, MountPath: claimTemplate.Name})
}
securityContext := &corev1.SecurityContext{}
if containerConfig.SecurityContextConfig.SetSecurityContext {
isWindows := tpl.NodeSelector[pipelinePod.OsSelectorLabel] == "windows"
securityContext = containerConfig.SecurityContextConfig.GetSecurityContext(isWindows)
}
var priorityClassName string
if tpl.PriorityClassName != nil {
priorityClassName = *tpl.PriorityClassName
}
containers := []corev1.Container{{
Name: "affinity-assistant",
Image: containerConfig.Image,
Args: []string{"tekton_run_indefinitely"},
// Set requests == limits to get QoS class _Guaranteed_.
// See https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
// Affinity Assistant pod is a placeholder; request minimal resources
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
"cpu": resource.MustParse("50m"),
"memory": resource.MustParse("100Mi"),
},
Requests: corev1.ResourceList{
"cpu": resource.MustParse("50m"),
"memory": resource.MustParse("100Mi"),
},
},
VolumeMounts: mounts,
SecurityContext: securityContext,
}}
var volumes []corev1.Volume
for i, claimName := range claimNames {
volumes = append(volumes, corev1.Volume{
Name: fmt.Sprintf("workspace-%d", i),
VolumeSource: corev1.VolumeSource{
// A Pod mounting a PersistentVolumeClaim that has a StorageClass with
// volumeBindingMode: Immediate
// the PV is allocated on a Node first, and then the pod need to be
// scheduled to that node.
// To support those PVCs, the Affinity Assistant must also mount the
// same PersistentVolumeClaim - to be sure that the Affinity Assistant
// pod is scheduled to the same Availability Zone as the PV, when using
// a regional cluster. This is called VolumeScheduling.
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: claimName},
},
})
}
return &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: getStatefulSetLabels(pr, name),
OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(pr)},
},
Spec: appsv1.StatefulSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: getStatefulSetLabels(pr, name),
},
// by setting VolumeClaimTemplates from StatefulSet, all the PVs are scheduled to the same Availability Zone as the StatefulSet
VolumeClaimTemplates: claimTemplates,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: getStatefulSetLabels(pr, name),
},
Spec: corev1.PodSpec{
Containers: containers,
Tolerations: tpl.Tolerations,
NodeSelector: tpl.NodeSelector,
ImagePullSecrets: tpl.ImagePullSecrets,
SecurityContext: tpl.SecurityContext,
PriorityClassName: priorityClassName,
Affinity: getAssistantAffinityMergedWithPodTemplateAffinity(pr, aaBehavior),
Volumes: volumes,
},
},
},
}
}
// getAssistantAffinityMergedWithPodTemplateAffinity return the affinity that merged with PipelineRun PodTemplate affinity.
func getAssistantAffinityMergedWithPodTemplateAffinity(pr *v1.PipelineRun, aaBehavior aa.AffinityAssistantBehavior) *corev1.Affinity {
affinityAssistantsAffinity := &corev1.Affinity{}
if pr.Spec.TaskRunTemplate.PodTemplate != nil && pr.Spec.TaskRunTemplate.PodTemplate.Affinity != nil {
affinityAssistantsAffinity = pr.Spec.TaskRunTemplate.PodTemplate.Affinity
}
if affinityAssistantsAffinity.PodAntiAffinity == nil {
affinityAssistantsAffinity.PodAntiAffinity = &corev1.PodAntiAffinity{}
}
repelOtherAffinityAssistantsPodAffinityTerm := corev1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
workspace.LabelComponent: workspace.ComponentNameAffinityAssistant,
},
},
TopologyKey: "kubernetes.io/hostname",
}
if aaBehavior == aa.AffinityAssistantPerPipelineRunWithIsolation {
// use RequiredDuringSchedulingIgnoredDuringExecution term to enforce only one pipelinerun can run in a node at a time
affinityAssistantsAffinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(affinityAssistantsAffinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
repelOtherAffinityAssistantsPodAffinityTerm)
} else {
preferredRepelOtherAffinityAssistantsPodAffinityTerm := corev1.WeightedPodAffinityTerm{
Weight: 100,
PodAffinityTerm: repelOtherAffinityAssistantsPodAffinityTerm,
}
// use RequiredDuringSchedulingIgnoredDuringExecution term to schedule pipelineruns to different nodes when possible
affinityAssistantsAffinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(affinityAssistantsAffinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
preferredRepelOtherAffinityAssistantsPodAffinityTerm)
}
return affinityAssistantsAffinity
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipelinerun
import (
"context"
"encoding/json"
"fmt"
"log"
"strings"
"time"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
"go.uber.org/zap"
jsonpatch "gomodules.xyz/jsonpatch/v2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/apis"
)
var cancelTaskRunPatchBytes, cancelCustomRunPatchBytes []byte
func init() {
var err error
cancelTaskRunPatchBytes, err = json.Marshal([]jsonpatch.JsonPatchOperation{
{
Operation: "add",
Path: "/spec/status",
Value: v1.TaskRunSpecStatusCancelled,
},
{
Operation: "add",
Path: "/spec/statusMessage",
Value: v1.TaskRunCancelledByPipelineMsg,
}})
if err != nil {
log.Fatalf("failed to marshal TaskRun cancel patch bytes: %v", err)
}
cancelCustomRunPatchBytes, err = json.Marshal([]jsonpatch.JsonPatchOperation{
{
Operation: "add",
Path: "/spec/status",
Value: v1beta1.CustomRunSpecStatusCancelled,
},
{
Operation: "add",
Path: "/spec/statusMessage",
Value: v1beta1.CustomRunCancelledByPipelineMsg,
}})
if err != nil {
log.Fatalf("failed to marshal CustomRun cancel patch bytes: %v", err)
}
}
func cancelCustomRun(ctx context.Context, runName string, namespace string, clientSet clientset.Interface) error {
_, err := clientSet.TektonV1beta1().CustomRuns(namespace).Patch(ctx, runName, types.JSONPatchType, cancelCustomRunPatchBytes, metav1.PatchOptions{}, "")
if errors.IsNotFound(err) {
// The resource may have been deleted in the meanwhile, but we should
// still be able to cancel the PipelineRun
return nil
}
return err
}
func cancelTaskRun(ctx context.Context, taskRunName string, namespace string, clientSet clientset.Interface) error {
_, err := clientSet.TektonV1().TaskRuns(namespace).Patch(ctx, taskRunName, types.JSONPatchType, cancelTaskRunPatchBytes, metav1.PatchOptions{}, "")
if errors.IsNotFound(err) {
// The resource may have been deleted in the meanwhile, but we should
// still be able to cancel the PipelineRun
return nil
}
if pipelineErrors.IsImmutableTaskRunSpecError(err) {
// The TaskRun may have completed and the spec field is immutable, we should ignore this error.
return nil
}
return err
}
// cancelPipelineRun marks the PipelineRun as cancelled and any resolved TaskRun(s) too.
func cancelPipelineRun(ctx context.Context, logger *zap.SugaredLogger, pr *v1.PipelineRun, clientSet clientset.Interface) error {
errs := cancelPipelineTaskRuns(ctx, logger, pr, clientSet)
// If we successfully cancelled all the TaskRuns and Runs, we can consider the PipelineRun cancelled.
if len(errs) == 0 {
reason := v1.PipelineRunReasonCancelled
pr.Status.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: reason.String(),
Message: fmt.Sprintf("PipelineRun %q was cancelled", pr.Name),
})
// update pr completed time
pr.Status.CompletionTime = &metav1.Time{Time: time.Now()}
} else {
e := strings.Join(errs, "\n")
// Indicate that we failed to cancel the PipelineRun
pr.Status.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
Reason: v1.PipelineRunReasonCouldntCancel.String(),
Message: fmt.Sprintf("PipelineRun %q was cancelled but had errors trying to cancel TaskRuns and/or Runs: %s", pr.Name, e),
})
return fmt.Errorf("error(s) from cancelling TaskRun(s) from PipelineRun %s: %s", pr.Name, e)
}
return nil
}
// cancelPipelineTaskRuns patches `TaskRun` and `Run` with canceled status
func cancelPipelineTaskRuns(ctx context.Context, logger *zap.SugaredLogger, pr *v1.PipelineRun, clientSet clientset.Interface) []string {
return cancelPipelineTaskRunsForTaskNames(ctx, logger, pr, clientSet, sets.NewString())
}
// cancelPipelineTaskRunsForTaskNames patches `TaskRun`s and `Run`s for the given task names, or all if no task names are given, with canceled status
func cancelPipelineTaskRunsForTaskNames(ctx context.Context, logger *zap.SugaredLogger, pr *v1.PipelineRun, clientSet clientset.Interface, taskNames sets.String) []string {
errs := []string{}
trNames, customRunNames, err := getChildObjectsFromPRStatusForTaskNames(ctx, pr.Status, taskNames)
if err != nil {
errs = append(errs, err.Error())
}
for _, taskRunName := range trNames {
logger.Infof("cancelling TaskRun %s", taskRunName)
if err := cancelTaskRun(ctx, taskRunName, pr.Namespace, clientSet); err != nil {
errs = append(errs, fmt.Errorf("failed to patch TaskRun `%s` with cancellation: %w", taskRunName, err).Error())
continue
}
}
for _, runName := range customRunNames {
logger.Infof("cancelling CustomRun %s", runName)
if err := cancelCustomRun(ctx, runName, pr.Namespace, clientSet); err != nil {
errs = append(errs, fmt.Errorf("failed to patch CustomRun `%s` with cancellation: %w", runName, err).Error())
continue
}
}
return errs
}
// getChildObjectsFromPRStatusForTaskNames returns taskruns and customruns in the PipelineRunStatus's ChildReferences,
// based on the given set of PipelineTask names. If that set is empty, all are returned.
func getChildObjectsFromPRStatusForTaskNames(ctx context.Context, prs v1.PipelineRunStatus, taskNames sets.String) ([]string, []string, error) {
var trNames []string
var customRunNames []string
unknownChildKinds := make(map[string]string)
for _, cr := range prs.ChildReferences {
if taskNames.Len() == 0 || taskNames.Has(cr.PipelineTaskName) {
switch cr.Kind {
case taskRun:
trNames = append(trNames, cr.Name)
case customRun:
customRunNames = append(customRunNames, cr.Name)
default:
unknownChildKinds[cr.Name] = cr.Kind
}
}
}
var err error
if len(unknownChildKinds) > 0 {
err = fmt.Errorf("found child objects of unknown kinds: %v", unknownChildKinds)
}
return trNames, customRunNames, err
}
// gracefullyCancelPipelineRun marks any non-final resolved TaskRun(s) as cancelled and runs finally.
func gracefullyCancelPipelineRun(ctx context.Context, logger *zap.SugaredLogger, pr *v1.PipelineRun, clientSet clientset.Interface) error {
errs := cancelPipelineTaskRuns(ctx, logger, pr, clientSet)
// If we successfully cancelled all the TaskRuns and Runs, we can proceed with the PipelineRun reconciliation to trigger finally.
if len(errs) > 0 {
e := strings.Join(errs, "\n")
// Indicate that we failed to cancel the PipelineRun
pr.Status.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
Reason: v1.PipelineRunReasonCouldntCancel.String(),
Message: fmt.Sprintf("PipelineRun %q was cancelled but had errors trying to cancel TaskRuns and/or Runs: %s", pr.Name, e),
})
return fmt.Errorf("error(s) from cancelling TaskRun(s) from PipelineRun %s: %s", pr.Name, e)
}
return nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipelinerun
import (
"context"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client"
pipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun"
taskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun"
verificationpolicyinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy"
customruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun"
pipelinerunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun"
resolutionclient "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client"
resolutioninformer "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest"
"github.com/tektoncd/pipeline/pkg/pipelinerunmetrics"
cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
resolution "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
"github.com/tektoncd/pipeline/pkg/tracing"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/clock"
kubeclient "knative.dev/pkg/client/injection/kube/client"
secretinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/secret"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
)
const (
// TracerProviderName is the name of TraceProvider
TracerProviderName = "pipelinerun-reconciler"
)
var pipelineRunFilterManagedBy = func(obj interface{}) bool {
pr, ok := obj.(*v1.PipelineRun)
if !ok {
return true
}
// Only promote PipelineRuns that are managed by this controller
if pr.Spec.ManagedBy != nil && *pr.Spec.ManagedBy != pipeline.ManagedBy {
return false
}
return true
}
// NewController instantiates a new controller.Impl from knative.dev/pkg/controller
func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(context.Context, configmap.Watcher) *controller.Impl {
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
logger := logging.FromContext(ctx)
kubeclientset := kubeclient.Get(ctx)
pipelineclientset := pipelineclient.Get(ctx)
taskRunInformer := taskruninformer.Get(ctx)
customRunInformer := customruninformer.Get(ctx)
pipelineRunInformer := pipelineruninformer.Get(ctx)
resolutionInformer := resolutioninformer.Get(ctx)
verificationpolicyInformer := verificationpolicyinformer.Get(ctx)
secretinformer := secretinformer.Get(ctx)
tracerProvider := tracing.New(TracerProviderName, logger.Named("tracing"))
pipelinerunmetricsRecorder := pipelinerunmetrics.Get(ctx)
//nolint:contextcheck // OnStore methods does not support context as a parameter
configStore := config.NewStore(logger.Named("config-store"),
pipelinerunmetrics.OnStore(logger, pipelinerunmetricsRecorder),
tracerProvider.OnStore(secretinformer.Lister()),
)
configStore.WatchConfigs(cmw)
c := &Reconciler{
KubeClientSet: kubeclientset,
PipelineClientSet: pipelineclientset,
Images: opts.Images,
Clock: clock,
pipelineRunLister: pipelineRunInformer.Lister(),
taskRunLister: taskRunInformer.Lister(),
customRunLister: customRunInformer.Lister(),
verificationPolicyLister: verificationpolicyInformer.Lister(),
cloudEventClient: cloudeventclient.Get(ctx),
metrics: pipelinerunmetricsRecorder,
pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger),
resolutionRequester: resolution.NewCRDRequester(resolutionclient.Get(ctx), resolutionInformer.Lister()),
tracerProvider: tracerProvider,
}
impl := pipelinerunreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options {
return controller.Options{
AgentName: pipeline.PipelineRunControllerName,
ConfigStore: configStore,
PromoteFilterFunc: pipelineRunFilterManagedBy,
}
})
if _, err := secretinformer.Informer().AddEventHandler(controller.HandleAll(tracerProvider.Handler)); err != nil {
logging.FromContext(ctx).Panicf("Couldn't register Secret informer event handler: %w", err)
}
if _, err := pipelineRunInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: pipelineRunFilterManagedBy,
Handler: controller.HandleAll(impl.Enqueue),
}); err != nil {
logging.FromContext(ctx).Panicf("Couldn't register PipelineRun informer event handler: %w", err)
}
if _, err := pipelineRunInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: controller.FilterController(&v1.PipelineRun{}),
Handler: controller.HandleAll(impl.EnqueueControllerOf),
}); err != nil {
logging.FromContext(ctx).Panicf("Couldn't register PipelineRun informer event handler: %w", err)
}
if _, err := taskRunInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: controller.FilterController(&v1.PipelineRun{}),
Handler: controller.HandleAll(impl.EnqueueControllerOf),
}); err != nil {
logging.FromContext(ctx).Panicf("Couldn't register TaskRun informer event handler: %w", err)
}
if _, err := customRunInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: controller.FilterController(&v1.PipelineRun{}),
Handler: controller.HandleAll(impl.EnqueueControllerOf),
}); err != nil {
logging.FromContext(ctx).Panicf("Couldn't register CustomRun informer event handler: %w", err)
}
if _, err := resolutionInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: controller.FilterController(&v1.PipelineRun{}),
Handler: controller.HandleAll(impl.EnqueueControllerOf),
}); err != nil {
logging.FromContext(ctx).Panicf("Couldn't register ResolutionRequest informer event handler: %w", err)
}
return impl
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipelinerun
import (
"context"
"encoding/json"
"errors"
"fmt"
"path/filepath"
"reflect"
"regexp"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelinerunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun"
listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
alpha1listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1"
beta1listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
ctrl "github.com/tektoncd/pipeline/pkg/controller"
"github.com/tektoncd/pipeline/pkg/internal/affinityassistant"
resolutionutil "github.com/tektoncd/pipeline/pkg/internal/resolution"
"github.com/tektoncd/pipeline/pkg/pipelinerunmetrics"
tknreconciler "github.com/tektoncd/pipeline/pkg/reconciler"
"github.com/tektoncd/pipeline/pkg/reconciler/apiserver"
"github.com/tektoncd/pipeline/pkg/reconciler/events"
"github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
"github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag"
rprp "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinespec"
"github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources"
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun"
tresources "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources"
"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
"github.com/tektoncd/pipeline/pkg/remote"
resolution "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/substitution"
"github.com/tektoncd/pipeline/pkg/trustedresources"
"github.com/tektoncd/pipeline/pkg/workspace"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
k8slabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
"knative.dev/pkg/controller"
"knative.dev/pkg/kmap"
"knative.dev/pkg/kmeta"
"knative.dev/pkg/logging"
pkgreconciler "knative.dev/pkg/reconciler"
)
// Aliased for backwards compatibility; do not add additional reasons here
var (
// ReasonCouldntGetPipeline indicates that the reason for the failure status is that the
// associated Pipeline couldn't be retrieved
ReasonCouldntGetPipeline = v1.PipelineRunReasonCouldntGetPipeline.String()
// ReasonInvalidBindings indicates that the reason for the failure status is that the
// PipelineResources bound in the PipelineRun didn't match those declared in the Pipeline
ReasonInvalidBindings = v1.PipelineRunReasonInvalidBindings.String()
// ReasonInvalidWorkspaceBinding indicates that a Pipeline expects a workspace but a
// PipelineRun has provided an invalid binding.
ReasonInvalidWorkspaceBinding = v1.PipelineRunReasonInvalidWorkspaceBinding.String()
// ReasonInvalidTaskRunSpec indicates that PipelineRun.Spec.TaskRunSpecs[].PipelineTaskName is defined with
// a not exist taskName in pipelineSpec.
ReasonInvalidTaskRunSpec = v1.PipelineRunReasonInvalidTaskRunSpec.String()
// ReasonParameterTypeMismatch indicates that the reason for the failure status is that
// parameter(s) declared in the PipelineRun do not have the some declared type as the
// parameters(s) declared in the Pipeline that they are supposed to override.
ReasonParameterTypeMismatch = v1.PipelineRunReasonParameterTypeMismatch.String()
// ReasonObjectParameterMissKeys indicates that the object param value provided from PipelineRun spec
// misses some keys required for the object param declared in Pipeline spec.
ReasonObjectParameterMissKeys = v1.PipelineRunReasonObjectParameterMissKeys.String()
// ReasonParamArrayIndexingInvalid indicates that the use of param array indexing is out of bound.
ReasonParamArrayIndexingInvalid = v1.PipelineRunReasonParamArrayIndexingInvalid.String()
// ReasonCouldntGetTask indicates that the reason for the failure status is that the
// associated Pipeline's Tasks couldn't all be retrieved
ReasonCouldntGetTask = v1.PipelineRunReasonCouldntGetTask.String()
// ReasonParameterMissing indicates that the reason for the failure status is that the
// associated PipelineRun didn't provide all the required parameters
ReasonParameterMissing = v1.PipelineRunReasonParameterMissing.String()
// ReasonFailedValidation indicates that the reason for failure status is
// that pipelinerun failed runtime validation
ReasonFailedValidation = v1.PipelineRunReasonFailedValidation.String()
// ReasonInvalidGraph indicates that the reason for the failure status is that the
// associated Pipeline is an invalid graph (a.k.a wrong order, cycle, …)
ReasonInvalidGraph = v1.PipelineRunReasonInvalidGraph.String()
// ReasonCancelled indicates that a PipelineRun was cancelled.
ReasonCancelled = v1.PipelineRunReasonCancelled.String()
// ReasonPending indicates that a PipelineRun is pending.
ReasonPending = v1.PipelineRunReasonPending.String()
// ReasonCouldntCancel indicates that a PipelineRun was cancelled but attempting to update
// all of the running TaskRuns as cancelled failed.
ReasonCouldntCancel = v1.PipelineRunReasonCouldntCancel.String()
// ReasonCouldntTimeOut indicates that a PipelineRun was timed out but attempting to update
// all of the running TaskRuns as timed out failed.
ReasonCouldntTimeOut = v1.PipelineRunReasonCouldntTimeOut.String()
// ReasonInvalidMatrixParameterTypes indicates a matrix contains invalid parameter types
ReasonInvalidMatrixParameterTypes = v1.PipelineRunReasonInvalidMatrixParameterTypes.String()
// ReasonInvalidTaskResultReference indicates a task result was declared
// but was not initialized by that task
ReasonInvalidTaskResultReference = v1.PipelineRunReasonInvalidTaskResultReference.String()
// ReasonRequiredWorkspaceMarkedOptional indicates an optional workspace
// has been passed to a Task that is expecting a non-optional workspace
ReasonRequiredWorkspaceMarkedOptional = v1.PipelineRunReasonRequiredWorkspaceMarkedOptional.String()
// ReasonResolvingPipelineRef indicates that the PipelineRun is waiting for
// its pipelineRef to be asynchronously resolved.
ReasonResolvingPipelineRef = v1.PipelineRunReasonResolvingPipelineRef.String()
// ReasonResourceVerificationFailed indicates that the pipeline fails the trusted resource verification,
// it could be the content has changed, signature is invalid or public key is invalid
ReasonResourceVerificationFailed = v1.PipelineRunReasonResourceVerificationFailed.String()
// ReasonCreateRunFailed indicates that the pipeline fails to create the taskrun or other run resources
ReasonCreateRunFailed = v1.PipelineRunReasonCreateRunFailed.String()
)
// constants used as kind descriptors for various types of runs; these constants
// match their corresponding controller names. Given that it's odd to use a
// "ControllerName" const in describing the type of run, we import these
// constants (for consistency) but rename them (for ergonomic semantics).
const (
taskRun = pipeline.TaskRunControllerName
customRun = pipeline.CustomRunControllerName
pipelineRun = pipeline.PipelineRunControllerName
)
// Reconciler implements controller.Reconciler for Configuration resources.
type Reconciler struct {
KubeClientSet kubernetes.Interface
PipelineClientSet clientset.Interface
Images pipeline.Images
Clock clock.PassiveClock
// listers index properties about resources
pipelineRunLister listers.PipelineRunLister
taskRunLister listers.TaskRunLister
customRunLister beta1listers.CustomRunLister
verificationPolicyLister alpha1listers.VerificationPolicyLister
cloudEventClient cloudevent.CEClient
metrics *pipelinerunmetrics.Recorder
pvcHandler volumeclaim.PvcHandler
resolutionRequester resolution.Requester
tracerProvider trace.TracerProvider
}
var (
// Check that our Reconciler implements pipelinerunreconciler.Interface
_ pipelinerunreconciler.Interface = (*Reconciler)(nil)
filterReservedAnnotationRegexp = regexp.MustCompile(pipeline.TektonReservedAnnotationExpr)
)
// ReconcileKind compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Pipeline Run
// resource with the current status of the resource.
func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1.PipelineRun) pkgreconciler.Event {
logger := logging.FromContext(ctx)
ctx = cloudevent.ToContext(ctx, c.cloudEventClient)
ctx = initTracing(ctx, c.tracerProvider, pr)
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "PipelineRun:ReconcileKind")
defer span.End()
span.SetAttributes(
attribute.String("pipelinerun", pr.Name), attribute.String("namespace", pr.Namespace),
)
// Read the initial condition
before := pr.Status.GetCondition(apis.ConditionSucceeded)
// Check if we are failing to mark this as timed out for a while. If we are, mark immediately and finish the
// reconcile. We are assuming here that if the PipelineRun has timed out for a long time, it had time to run
// before and it kept failing. One reason that can happen is exceeding etcd request size limit. Finishing it early
// makes sure the request size is manageable
if !pr.IsDone() && pr.HasTimedOutForALongTime(ctx, c.Clock) && !pr.IsTimeoutConditionSet() {
if err := timeoutPipelineRun(ctx, logger, pr, c.PipelineClientSet); err != nil {
return err
}
if err := c.finishReconcileUpdateEmitEvents(ctx, pr, before, nil); err != nil {
return err
}
return controller.NewPermanentError(errors.New("PipelineRun has timed out for a long time"))
}
if !pr.HasStarted() && !pr.IsPending() {
pr.Status.InitializeConditions(c.Clock)
// In case node time was not synchronized, when controller has been scheduled to other nodes.
if pr.Status.StartTime.Sub(pr.CreationTimestamp.Time) < 0 {
logger.Warnf("PipelineRun %s createTimestamp %s is after the pipelineRun started %s", pr.GetNamespacedName().String(), pr.CreationTimestamp, pr.Status.StartTime)
pr.Status.StartTime = &pr.CreationTimestamp
}
// Emit events. During the first reconcile the status of the PipelineRun may change twice
// from not Started to Started and then to Running, so we need to sent the event here
// and at the end of 'Reconcile' again.
// We also want to send the "Started" event as soon as possible for anyone who may be waiting
// on the event to perform user facing initialisations, such has reset a CI check status
afterCondition := pr.Status.GetCondition(apis.ConditionSucceeded)
events.Emit(ctx, nil, afterCondition, pr)
// We already sent an event for start, so update `before` with the current status
before = pr.Status.GetCondition(apis.ConditionSucceeded)
}
// list VerificationPolicies for trusted resources
vp, err := c.verificationPolicyLister.VerificationPolicies(pr.Namespace).List(labels.Everything())
if err != nil {
return fmt.Errorf("failed to list VerificationPolicies from namespace %s with error %w", pr.Namespace, err)
}
getPipelineFunc := resources.GetPipelineFunc(ctx, c.KubeClientSet, c.PipelineClientSet, c.resolutionRequester, pr, vp)
if pr.IsDone() {
pr.SetDefaults(ctx)
err := c.cleanupAffinityAssistantsAndPVCs(ctx, pr)
if err != nil {
logger.Errorf("Failed to delete StatefulSet or PVC for PipelineRun %s: %v", pr.Name, err)
}
return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err)
}
if err := propagatePipelineNameLabelToPipelineRun(pr); err != nil {
logger.Errorf("Failed to propagate pipeline name label to pipelinerun %s: %v", pr.Name, err)
return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err)
}
// If the pipelinerun is cancelled, cancel tasks and update status
if pr.IsCancelled() {
err := cancelPipelineRun(ctx, logger, pr, c.PipelineClientSet)
return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err)
}
// Make sure that the PipelineRun status is in sync with the actual TaskRuns
err = c.updatePipelineRunStatusFromInformer(ctx, pr)
if err != nil {
// This should not fail. Return the error so we can re-try later.
logger.Errorf("Error while syncing the pipelinerun status: %v", err.Error())
return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err)
}
// Reconcile this copy of the pipelinerun and then write back any status or label
// updates regardless of whether the reconciliation errored out.
if err = c.reconcile(ctx, pr, getPipelineFunc, before); err != nil {
logger.Errorf("Reconcile error: %v", err.Error())
}
if err = c.finishReconcileUpdateEmitEvents(ctx, pr, before, err); err != nil {
return err
}
if pr.Status.StartTime != nil {
// Compute the time since the task started.
elapsed := c.Clock.Since(pr.Status.StartTime.Time)
// Snooze this resource until the appropriate timeout has elapsed.
// but if the timeout has been disabled by setting timeout to 0, we
// do not want to subtract from 0, because a negative wait time will
// result in the requeue happening essentially immediately
timeout := pr.PipelineTimeout(ctx)
taskTimeout := pr.TasksTimeout()
waitTime := timeout - elapsed
if timeout == config.NoTimeoutDuration {
waitTime = time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) * time.Minute
}
if pr.Status.FinallyStartTime == nil && taskTimeout != nil {
waitTime = pr.TasksTimeout().Duration - elapsed
if taskTimeout.Duration == config.NoTimeoutDuration {
waitTime = time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) * time.Minute
}
} else if pr.Status.FinallyStartTime != nil && pr.FinallyTimeout() != nil &&
pr.FinallyTimeout().Duration != config.NoTimeoutDuration {
finallyWaitTime := pr.FinallyTimeout().Duration - c.Clock.Since(pr.Status.FinallyStartTime.Time)
if finallyWaitTime < waitTime {
waitTime = finallyWaitTime
}
}
return controller.NewRequeueAfter(waitTime)
}
return nil
}
func (c *Reconciler) durationAndCountMetrics(ctx context.Context, pr *v1.PipelineRun, beforeCondition *apis.Condition) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "durationAndCountMetrics")
defer span.End()
logger := logging.FromContext(ctx)
if pr.IsDone() {
err := c.metrics.DurationAndCount(pr, beforeCondition)
if err != nil {
logger.Warnf("Failed to log the metrics : %v", err)
}
}
}
func (c *Reconciler) finishReconcileUpdateEmitEvents(ctx context.Context, pr *v1.PipelineRun, beforeCondition *apis.Condition, previousError error) error {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "finishReconcileUpdateEmitEvents")
defer span.End()
logger := logging.FromContext(ctx)
afterCondition := pr.Status.GetCondition(apis.ConditionSucceeded)
events.Emit(ctx, beforeCondition, afterCondition, pr)
_, err := c.updateLabelsAndAnnotations(ctx, pr)
if err != nil {
logger.Warn("Failed to update PipelineRun labels/annotations", zap.Error(err))
events.EmitError(controller.GetEventRecorder(ctx), err, pr)
}
errs := errors.Join(previousError, err)
if controller.IsPermanentError(previousError) {
return controller.NewPermanentError(errs)
}
return errs
}
// resolvePipelineState will attempt to resolve each referenced pipeline task in the pipeline's spec and all of the resources
// specified by those tasks.
func (c *Reconciler) resolvePipelineState(
ctx context.Context,
pipelineTasks []v1.PipelineTask,
pipelineMeta *metav1.ObjectMeta,
pr *v1.PipelineRun,
pst resources.PipelineRunState,
) (resources.PipelineRunState, error) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "resolvePipelineState")
defer span.End()
// Resolve each pipeline task individually because they each could have a different reference context (remote or local).
for _, pipelineTask := range pipelineTasks {
// We need the TaskRun name to ensure that we don't perform an additional remote resolution request for a PipelineTask
// in the TaskRun reconciler.
trName := resources.GetTaskRunName(
pr.Status.ChildReferences,
pipelineTask.Name,
pr.Name,
)
// list VerificationPolicies for trusted resources
vp, err := c.verificationPolicyLister.VerificationPolicies(pr.Namespace).List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("failed to list VerificationPolicies from namespace %s with error %w", pr.Namespace, err)
}
getChildPipelineRunFunc := func(name string) (*v1.PipelineRun, error) {
return c.pipelineRunLister.PipelineRuns(pr.Namespace).Get(name)
}
getTaskFunc := tresources.GetTaskFunc(
ctx,
c.KubeClientSet,
c.PipelineClientSet,
c.resolutionRequester,
pr,
pipelineTask.TaskRef,
trName,
pr.Namespace,
pr.Spec.TaskRunTemplate.ServiceAccountName,
vp,
)
getTaskRunFunc := func(name string) (*v1.TaskRun, error) {
return c.taskRunLister.TaskRuns(pr.Namespace).Get(name)
}
getCustomRunFunc := func(name string) (*v1beta1.CustomRun, error) {
r, err := c.customRunLister.CustomRuns(pr.Namespace).Get(name)
if err != nil {
return nil, err
}
return r, nil
}
resolvedTask, err := resources.ResolvePipelineTask(ctx,
*pr,
getChildPipelineRunFunc,
getTaskFunc,
getTaskRunFunc,
getCustomRunFunc,
pipelineTask,
pst,
)
if err != nil {
if resolutioncommon.IsErrTransient(err) {
return nil, err
}
if errors.Is(err, remote.ErrRequestInProgress) {
return nil, err
}
var nfErr *resources.TaskNotFoundError
if errors.As(err, &nfErr) {
pr.Status.MarkFailed(v1.PipelineRunReasonCouldntGetTask.String(),
"Pipeline %s/%s can't be Run; it contains Tasks that don't exist: %s",
pipelineMeta.Namespace, pipelineMeta.Name, nfErr)
} else {
pr.Status.MarkFailed(v1.PipelineRunReasonFailedValidation.String(),
"PipelineRun %s/%s can't be Run; couldn't resolve all references: %s",
pipelineMeta.Namespace, pr.Name, pipelineErrors.WrapUserError(err))
}
return nil, controller.NewPermanentError(err)
}
if resolvedTask.ResolvedTask != nil && resolvedTask.ResolvedTask.VerificationResult != nil {
cond, err := conditionFromVerificationResult(resolvedTask.ResolvedTask.VerificationResult, pr, pipelineTask.Name)
pr.Status.SetCondition(cond)
if err != nil {
pr.Status.MarkFailed(v1.PipelineRunReasonResourceVerificationFailed.String(), err.Error())
return nil, controller.NewPermanentError(err)
}
}
pst = append(pst, resolvedTask)
}
return pst, nil
}
func (c *Reconciler) reconcile(ctx context.Context, pr *v1.PipelineRun, getPipelineFunc rprp.GetPipeline, beforeCondition *apis.Condition) error {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "reconcile")
defer span.End()
defer c.durationAndCountMetrics(ctx, pr, beforeCondition)
logger := logging.FromContext(ctx)
pr.SetDefaults(ctx)
// When pipeline run is pending, return to avoid creating the task
if pr.IsPending() {
pr.Status.MarkRunning(v1.PipelineRunReasonPending.String(), fmt.Sprintf("PipelineRun %q is pending", pr.Name))
return nil
}
pipelineMeta, pipelineSpec, err := rprp.GetPipelineData(ctx, pr, getPipelineFunc)
switch {
case errors.Is(err, remote.ErrRequestInProgress):
message := fmt.Sprintf("PipelineRun %s/%s awaiting remote resource", pr.Namespace, pr.Name)
pr.Status.MarkRunning(v1.PipelineRunReasonResolvingPipelineRef.String(), message)
return nil
case errors.Is(err, apiserver.ErrReferencedObjectValidationFailed), errors.Is(err, apiserver.ErrCouldntValidateObjectPermanent):
logger.Errorf("Failed dryRunValidation for PipelineRun %s: %v", pr.Name, err)
pr.Status.MarkFailed(v1.PipelineRunReasonFailedValidation.String(),
"Failed dryRunValidation for PipelineRun %s: %s",
pr.Name, pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
case errors.Is(err, apiserver.ErrCouldntValidateObjectRetryable):
return err
case err != nil:
logger.Errorf("Failed to determine Pipeline spec to use for pipelinerun %s: %v", pr.Name, err)
pr.Status.MarkFailed(v1.PipelineRunReasonCouldntGetPipeline.String(),
"Error retrieving pipeline for pipelinerun %s/%s: %s",
pr.Namespace, pr.Name, err)
return controller.NewPermanentError(err)
default:
// Store the fetched PipelineSpec on the PipelineRun for auditing
if err := storePipelineSpecAndMergeMeta(ctx, pr, pipelineSpec, pipelineMeta); err != nil {
logger.Errorf("Failed to store PipelineSpec on PipelineRun.Status for pipelinerun %s: %v", pr.Name, err)
}
}
if pipelineMeta.VerificationResult != nil {
cond, err := conditionFromVerificationResult(pipelineMeta.VerificationResult, pr, pipelineMeta.Name)
pr.Status.SetCondition(cond)
if err != nil {
pr.Status.MarkFailed(v1.PipelineRunReasonResourceVerificationFailed.String(), err.Error())
return controller.NewPermanentError(err)
}
}
d, err := dag.Build(v1.PipelineTaskList(pipelineSpec.Tasks), v1.PipelineTaskList(pipelineSpec.Tasks).Deps())
if err != nil {
// This Run has failed, so we need to mark it as failed and stop reconciling it
pr.Status.MarkFailed(v1.PipelineRunReasonInvalidGraph.String(),
"PipelineRun %s/%s's Pipeline DAG is invalid: %s",
pr.Namespace, pr.Name, pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
}
// build DAG with a list of final tasks, this DAG is used later to identify
// if a task in PipelineRunState is final task or not
// the finally section is optional and might not exist
// dfinally holds an empty Graph in the absence of finally clause
dfinally, err := dag.Build(v1.PipelineTaskList(pipelineSpec.Finally), map[string][]string{})
if err != nil {
// This Run has failed, so we need to mark it as failed and stop reconciling it
pr.Status.MarkFailed(v1.PipelineRunReasonInvalidGraph.String(),
"PipelineRun %s/%s's Pipeline DAG is invalid for finally clause: %s",
pr.Namespace, pr.Name, pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
}
if err := pipelineSpec.Validate(ctx); err != nil {
// This Run has failed, so we need to mark it as failed and stop reconciling it
pr.Status.MarkFailed(v1.PipelineRunReasonFailedValidation.String(),
"Pipeline %s/%s can't be Run; it has an invalid spec: %s",
pipelineMeta.Namespace, pipelineMeta.Name, pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
}
// Ensure that the PipelineRun provides all the parameters required by the Pipeline
if err := resources.ValidateRequiredParametersProvided(&pipelineSpec.Params, &pr.Spec.Params); err != nil {
// This Run has failed, so we need to mark it as failed and stop reconciling it
pr.Status.MarkFailed(v1.PipelineRunReasonParameterMissing.String(),
"PipelineRun %s/%s is missing some parameters required by Pipeline %s/%s: %s",
pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err)
return controller.NewPermanentError(err)
}
// Ensure that the parameters from the PipelineRun are overriding Pipeline parameters with the same type.
// Weird substitution issues can occur if this is not validated (ApplyParameters() does not verify type).
if err = resources.ValidateParamTypesMatching(pipelineSpec, pr); err != nil {
// This Run has failed, so we need to mark it as failed and stop reconciling it
pr.Status.MarkFailed(v1.PipelineRunReasonParameterTypeMismatch.String(),
"PipelineRun %s/%s parameters have mismatching types with Pipeline %s/%s's parameters: %s",
pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err)
return controller.NewPermanentError(err)
}
if config.FromContextOrDefaults(ctx).FeatureFlags.EnableParamEnum {
if err := taskrun.ValidateEnumParam(ctx, pr.Spec.Params, pipelineSpec.Params); err != nil {
logger.Errorf("PipelineRun %q Param Enum validation failed: %v", pr.Name, err)
pr.Status.MarkFailed(v1.PipelineRunReasonInvalidParamValue.String(),
"PipelineRun %s/%s parameters have invalid value: %s",
pr.Namespace, pr.Name, pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
}
}
// Ensure that the keys of an object param declared in PipelineSpec are not missed in the PipelineRunSpec
if err = resources.ValidateObjectParamRequiredKeys(pipelineSpec.Params, pr.Spec.Params); err != nil {
// This Run has failed, so we need to mark it as failed and stop reconciling it
pr.Status.MarkFailed(v1.PipelineRunReasonObjectParameterMissKeys.String(),
"PipelineRun %s/%s parameters is missing object keys required by Pipeline %s/%s's parameters: %s",
pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err)
return controller.NewPermanentError(err)
}
// Ensure that the array reference is not out of bound
if err := resources.ValidateParamArrayIndex(pipelineSpec, pr.Spec.Params); err != nil {
// This Run has failed, so we need to mark it as failed and stop reconciling it
pr.Status.MarkFailed(v1.PipelineRunReasonParamArrayIndexingInvalid.String(),
"PipelineRun %s/%s failed validation: failed to validate Pipeline %s/%s's parameter which has an invalid index while referring to an array: %s",
pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err)
return controller.NewPermanentError(err)
}
// Ensure that the workspaces expected by the Pipeline are provided by the PipelineRun.
if err := resources.ValidateWorkspaceBindings(pipelineSpec, pr); err != nil {
pr.Status.MarkFailed(v1.PipelineRunReasonInvalidWorkspaceBinding.String(),
"PipelineRun %s/%s doesn't bind Pipeline %s/%s's Workspaces correctly: %s",
pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err)
return controller.NewPermanentError(err)
}
// Ensure that the TaskRunSpecs defined are correct.
if err := resources.ValidateTaskRunSpecs(pipelineSpec, pr); err != nil {
pr.Status.MarkFailed(v1.PipelineRunReasonInvalidTaskRunSpec.String(),
"PipelineRun %s/%s doesn't define taskRunSpecs correctly: %s",
pr.Namespace, pr.Name, err)
return controller.NewPermanentError(err)
}
resources.ApplyParametersToWorkspaceBindings(ctx, pr)
// Make a deep copy of the Pipeline and its Tasks before value substitution.
// This is used to find referenced pipeline-level params at each PipelineTask when validate param enum subset requirement
originalPipeline := pipelineSpec.DeepCopy()
originalTasks := originalPipeline.Tasks
originalTasks = append(originalTasks, originalPipeline.Finally...)
// Apply parameter substitution from the PipelineRun
pipelineSpec = resources.ApplyParameters(ctx, pipelineSpec, pr)
pipelineSpec = resources.ApplyContexts(pipelineSpec, pipelineMeta.Name, pr)
pipelineSpec = resources.ApplyWorkspaces(pipelineSpec, pr)
// Update pipelinespec of pipelinerun's status field
pr.Status.PipelineSpec = pipelineSpec
// validate pipelineSpec after apply parameters
if err := validatePipelineSpecAfterApplyParameters(ctx, pipelineSpec); err != nil {
// This Run has failed, so we need to mark it as failed and stop reconciling it
pr.Status.MarkFailed(v1.PipelineRunReasonFailedValidation.String(),
"Pipeline %s/%s can't be Run; it has an invalid spec: %s",
pipelineMeta.Namespace, pipelineMeta.Name, pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
}
// pipelineRunState holds a list of pipeline tasks after fetching their resolved Task specs.
// pipelineRunState also holds a taskRun for each pipeline task after the taskRun is created
// pipelineRunState is instantiated and updated on every reconcile cycle
// Resolve the set of tasks (and possibly task runs).
tasks := pipelineSpec.Tasks
if len(pipelineSpec.Finally) > 0 {
tasks = append(tasks, pipelineSpec.Finally...)
}
// We split tasks in two lists:
// - those with a completed (Task|Custom)Run reference (i.e. those that finished running)
// - those without a (Task|Custom)Run reference
// We resolve the status for the former first, to collect all results available at this stage
// We know that tasks in progress or completed have had their fan-out already calculated so
// they can be safely processed in the first iteration. The underlying assumption is that if
// a PipelineTask has at least one TaskRun associated, then all its TaskRuns have been
// created already.
// The second group takes as input the partial state built in the first iteration and finally
// the two results are collated
ranOrRunningTaskNames := sets.Set[string]{}
ranOrRunningTasks := []v1.PipelineTask{}
notStartedTasks := []v1.PipelineTask{}
for _, child := range pr.Status.ChildReferences {
ranOrRunningTaskNames.Insert(child.PipelineTaskName)
}
for _, task := range tasks {
if ranOrRunningTaskNames.Has(task.Name) {
ranOrRunningTasks = append(ranOrRunningTasks, task)
} else {
notStartedTasks = append(notStartedTasks, task)
}
}
// First iteration
pipelineRunState, err := c.resolvePipelineState(ctx, ranOrRunningTasks, pipelineMeta.ObjectMeta, pr, resources.PipelineRunState{})
switch {
case errors.Is(err, remote.ErrRequestInProgress):
message := fmt.Sprintf("PipelineRun %s/%s awaiting remote resource", pr.Namespace, pr.Name)
pr.Status.MarkRunning(v1.TaskRunReasonResolvingTaskRef, message)
return nil
case err != nil:
return err
default:
}
// Second iteration
pipelineRunState, err = c.resolvePipelineState(ctx, notStartedTasks, pipelineMeta.ObjectMeta, pr, pipelineRunState)
switch {
case errors.Is(err, remote.ErrRequestInProgress):
message := fmt.Sprintf("PipelineRun %s/%s awaiting remote resource", pr.Namespace, pr.Name)
pr.Status.MarkRunning(v1.TaskRunReasonResolvingTaskRef, message)
return nil
case err != nil:
return err
default:
}
// Build PipelineRunFacts with a list of resolved pipeline tasks,
// dag tasks graph and final tasks graph
pipelineRunFacts := &resources.PipelineRunFacts{
State: pipelineRunState,
SpecStatus: pr.Spec.Status,
TasksGraph: d,
FinalTasksGraph: dfinally,
TimeoutsState: resources.PipelineRunTimeoutsState{
Clock: c.Clock,
},
}
if pr.Status.StartTime != nil {
pipelineRunFacts.TimeoutsState.StartTime = &pr.Status.StartTime.Time
}
if pr.Status.FinallyStartTime != nil {
pipelineRunFacts.TimeoutsState.FinallyStartTime = &pr.Status.FinallyStartTime.Time
}
tasksTimeout := pr.TasksTimeout()
if tasksTimeout != nil {
pipelineRunFacts.TimeoutsState.TasksTimeout = &tasksTimeout.Duration
}
finallyTimeout := pr.FinallyTimeout()
if finallyTimeout != nil {
pipelineRunFacts.TimeoutsState.FinallyTimeout = &finallyTimeout.Duration
}
if pipelineTimeout := pr.PipelineTimeout(ctx); pipelineTimeout != 0 {
pipelineRunFacts.TimeoutsState.PipelineTimeout = &pipelineTimeout
}
for i, rpt := range pipelineRunFacts.State {
// Task?
if !rpt.IsCustomTask() && !rpt.IsChildPipeline() {
err := taskrun.ValidateResolvedTask(ctx, rpt.PipelineTask.Params, rpt.PipelineTask.Matrix, rpt.ResolvedTask)
if err != nil {
logger.Errorf("Failed to validate pipelinerun %s with error %v", pr.Name, err)
pr.Status.MarkFailed(v1.PipelineRunReasonFailedValidation.String(),
"Validation failed for pipelinerun %s with error %s",
pr.Name, pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
}
if config.FromContextOrDefaults(ctx).FeatureFlags.EnableParamEnum {
if err := resources.ValidateParamEnumSubset(originalTasks[i].Params, pipelineSpec.Params, rpt.ResolvedTask); err != nil {
logger.Errorf("Failed to validate pipelinerun %q with error %v", pr.Name, err)
pr.Status.MarkFailed(v1.PipelineRunReasonFailedValidation.String(),
"Validation failed for pipelinerun with error %s",
pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
}
}
}
}
// Evaluate the CEL of PipelineTask after the variable substitutions and validations.
for _, rpt := range pipelineRunFacts.State {
err := rpt.EvaluateCEL()
if err != nil {
logger.Errorf("Error evaluating CEL %s: %v", pr.Name, err)
pr.Status.MarkFailed(string(v1.PipelineRunReasonCELEvaluationFailed),
"Error evaluating CEL %s: %v", pr.Name, pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
}
}
// check if pipeline run is gracefully cancelled and there are active pipeline task runs, which require cancelling
if pr.IsGracefullyCancelled() && pipelineRunFacts.IsRunning() {
// If the pipelinerun is cancelled, cancel tasks, but run finally
err := gracefullyCancelPipelineRun(ctx, logger, pr, c.PipelineClientSet)
if err != nil {
// failed to cancel tasks, maybe retry would help (don't return permanent error)
return err
}
}
if pipelineRunFacts.State.IsBeforeFirstTaskRun() {
if err := resources.ValidatePipelineTaskResults(pipelineRunFacts.State); err != nil {
logger.Errorf("Failed to resolve task result reference for %q with error %v", pr.Name, err)
pr.Status.MarkFailed(v1.PipelineRunReasonInvalidTaskResultReference.String(), err.Error())
return controller.NewPermanentError(err)
}
if err := resources.ValidatePipelineResults(pipelineSpec, pipelineRunFacts.State); err != nil {
logger.Errorf("Failed to resolve pipeline result reference for %q with error %v", pr.Name, err)
pr.Status.MarkFailed(v1.PipelineRunReasonInvalidPipelineResultReference.String(),
"Failed to resolve pipeline result reference for %q with error %v",
pr.Name, err)
return controller.NewPermanentError(err)
}
if err := resources.ValidateOptionalWorkspaces(pipelineSpec.Workspaces, pipelineRunFacts.State); err != nil {
logger.Errorf("Optional workspace not supported by task: %v", err)
pr.Status.MarkFailed(v1.PipelineRunReasonRequiredWorkspaceMarkedOptional.String(),
"Optional workspace not supported by task: %v", pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
}
aaBehavior, err := affinityassistant.GetAffinityAssistantBehavior(ctx)
if err != nil {
return controller.NewPermanentError(err)
}
if err := c.createOrUpdateAffinityAssistantsAndPVCs(ctx, pr, aaBehavior); err != nil {
switch {
case errors.Is(err, volumeclaim.ErrPvcCreationFailed):
logger.Errorf("Failed to create PVC for PipelineRun %s: %v", pr.Name, err)
pr.Status.MarkFailed(volumeclaim.ReasonCouldntCreateWorkspacePVC,
"Failed to create PVC for PipelineRun %s/%s correctly: %s",
pr.Namespace, pr.Name, err)
case errors.Is(err, volumeclaim.ErrPvcCreationFailedRetryable):
logger.Errorf("Failed to create PVC for PipelineRun %s: %v", pr.Name, err)
pr.Status.MarkRunning(ReasonPending, "Waiting for PVC creation to succeed: %v", err)
return err // not a permanent error, will requeue
case errors.Is(err, ErrAffinityAssistantCreationFailed):
logger.Errorf("Failed to create affinity assistant StatefulSet for PipelineRun %s: %v", pr.Name, err)
pr.Status.MarkFailed(ReasonCouldntCreateOrUpdateAffinityAssistantStatefulSet,
"Failed to create StatefulSet for PipelineRun %s/%s correctly: %s",
pr.Namespace, pr.Name, err)
default:
logger.Errorf("default error handling for PipelineRun %s: %v", pr.Name, err)
}
return controller.NewPermanentError(err)
}
}
if pr.Status.FinallyStartTime == nil {
if pr.HaveTasksTimedOut(ctx, c.Clock) {
tasksToTimeOut := sets.NewString()
for _, pt := range pipelineRunFacts.State {
if !pt.IsFinalTask(pipelineRunFacts) && pt.IsRunning() {
tasksToTimeOut.Insert(pt.PipelineTask.Name)
}
}
if tasksToTimeOut.Len() > 0 {
logger.Debugf("PipelineRun tasks timeout of %s reached, cancelling tasks", tasksTimeout)
errs := timeoutPipelineTasksForTaskNames(ctx, logger, pr, c.PipelineClientSet, tasksToTimeOut)
if len(errs) > 0 {
errString := strings.Join(errs, "\n")
logger.Errorf("Failed to timeout tasks for PipelineRun %s/%s: %s", pr.Namespace, pr.Name, errString)
return fmt.Errorf("error(s) from cancelling TaskRun(s) from PipelineRun %s: %s", pr.Name, errString)
}
}
}
} else if pr.HasFinallyTimedOut(ctx, c.Clock) {
tasksToTimeOut := sets.NewString()
for _, pt := range pipelineRunFacts.State {
if pt.IsFinalTask(pipelineRunFacts) && pt.IsRunning() {
tasksToTimeOut.Insert(pt.PipelineTask.Name)
}
}
if tasksToTimeOut.Len() > 0 {
logger.Debugf("PipelineRun finally timeout of %s reached, cancelling finally tasks", finallyTimeout)
errs := timeoutPipelineTasksForTaskNames(ctx, logger, pr, c.PipelineClientSet, tasksToTimeOut)
if len(errs) > 0 {
errString := strings.Join(errs, "\n")
logger.Errorf("Failed to timeout finally tasks for PipelineRun %s/%s: %s", pr.Namespace, pr.Name, errString)
return fmt.Errorf("error(s) from cancelling TaskRun(s) from PipelineRun %s: %s", pr.Name, errString)
}
}
}
if err := c.runNextSchedulableTask(ctx, pr, pipelineRunFacts); err != nil {
return err
}
// Reset the skipped status to trigger recalculation
pipelineRunFacts.ResetSkippedCache()
// If the pipelinerun has timed out, mark tasks as timed out and update status
if pr.HasTimedOut(ctx, c.Clock) {
if err := timeoutPipelineRun(ctx, logger, pr, c.PipelineClientSet); err != nil {
return err
}
}
after := pipelineRunFacts.GetPipelineConditionStatus(ctx, pr, logger, c.Clock)
switch after.Status {
case corev1.ConditionTrue:
pr.Status.MarkSucceeded(after.Reason, after.Message)
case corev1.ConditionFalse:
pr.Status.MarkFailed(after.Reason, after.Message)
case corev1.ConditionUnknown:
pr.Status.MarkRunning(after.Reason, after.Message)
}
// Read the condition the way it was set by the Mark* helpers
after = pr.Status.GetCondition(apis.ConditionSucceeded)
pr.Status.StartTime = pipelineRunFacts.State.AdjustStartTime(pr.Status.StartTime)
pr.Status.ChildReferences = pipelineRunFacts.GetChildReferences()
pr.Status.SkippedTasks = pipelineRunFacts.GetSkippedTasks()
pipelineTaskStatus := pipelineRunFacts.GetPipelineTaskStatus()
finalPipelineTaskStatus := pipelineRunFacts.GetPipelineFinalTaskStatus()
pipelineTaskStatus = kmap.Union(pipelineTaskStatus, finalPipelineTaskStatus)
if after.Status == corev1.ConditionTrue || after.Status == corev1.ConditionFalse {
pr.Status.Results, err = resources.ApplyTaskResultsToPipelineResults(
ctx,
pipelineSpec.Results,
pipelineRunFacts.State.GetTaskRunsResults(),
pipelineRunFacts.State.GetRunsResults(),
pipelineTaskStatus,
)
if err != nil {
pr.Status.MarkFailed(v1.PipelineRunReasonCouldntGetPipelineResult.String(),
"Failed to get PipelineResult from TaskRun Results for PipelineRun %s: %s",
pr.Name, err)
return err
}
}
logger.Infof("PipelineRun %s status is being set to %s", pr.Name, after)
return nil
}
// runNextSchedulableTask gets the next schedulable Tasks from the dag based on the current
// pipeline run state, and starts them
// after all DAG tasks are done, it's responsible for scheduling final tasks and start executing them
func (c *Reconciler) runNextSchedulableTask(ctx context.Context, pr *v1.PipelineRun, pipelineRunFacts *resources.PipelineRunFacts) error {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "runNextSchedulableTask")
defer span.End()
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
// nextRpts holds a list of pipeline tasks which should be executed next
nextRpts, err := pipelineRunFacts.DAGExecutionQueue()
if err != nil {
logger.Errorf("Error getting potential next tasks for valid pipelinerun %s: %v", pr.Name, err)
return controller.NewPermanentError(err)
}
for _, rpt := range nextRpts {
// Check for Missing Result References
// if error found, present rpt will be
// added to the validationFailedTask list
err := resources.CheckMissingResultReferences(pipelineRunFacts.State, rpt)
if err != nil {
logger.Infof("Failed to resolve task result reference for %q with error %v", pr.Name, err)
// If there is an error encountered, no new task
// will be scheduled, hence nextRpts should be empty
// If finally tasks are found, then those tasks will
// be added to the nextRpts
nextRpts = nil
logger.Infof("Adding the task %q to the validation failed list", rpt.ResolvedTask)
pipelineRunFacts.ValidationFailedTask = append(pipelineRunFacts.ValidationFailedTask, rpt)
}
}
// GetFinalTasks only returns final tasks when a DAG is complete
fNextRpts := pipelineRunFacts.GetFinalTasks()
if len(fNextRpts) != 0 {
// apply the runtime context just before creating taskRuns for final tasks in queue
resources.ApplyPipelineTaskStateContext(fNextRpts, pipelineRunFacts.GetPipelineTaskStatus())
// Before creating TaskRun for scheduled final task, check if it's consuming a task result
// Resolve and apply task result wherever applicable, report warning in case resolution fails
for _, rpt := range fNextRpts {
resolvedResultRefs, _, err := resources.ResolveResultRef(pipelineRunFacts.State, rpt)
if err != nil {
logger.Infof("Final task %q is not executed as it could not resolve task params for %q: %v", rpt.PipelineTask.Name, pr.Name, err)
continue
}
resources.ApplyTaskResults(resources.PipelineRunState{rpt}, resolvedResultRefs)
if err := rpt.EvaluateCEL(); err != nil {
logger.Errorf("Final task %q is not executed, due to error evaluating CEL %s: %v", rpt.PipelineTask.Name, pr.Name, err)
pr.Status.MarkFailed(string(v1.PipelineRunReasonCELEvaluationFailed),
"Error evaluating CEL %s: %v", pr.Name, pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
}
nextRpts = append(nextRpts, rpt)
}
}
// If FinallyStartTime is not set, and one or more final tasks has been created
// Try to set the FinallyStartTime of this PipelineRun
if pr.Status.FinallyStartTime == nil && pipelineRunFacts.IsFinalTaskStarted() {
c.setFinallyStartedTimeIfNeeded(pr, pipelineRunFacts)
}
resources.ApplyResultsToWorkspaceBindings(pipelineRunFacts.State.GetTaskRunsResults(), pr)
for _, rpt := range nextRpts {
if rpt.IsFinalTask(pipelineRunFacts) {
c.setFinallyStartedTimeIfNeeded(pr, pipelineRunFacts)
}
if rpt == nil || rpt.Skip(pipelineRunFacts).IsSkipped || rpt.IsFinallySkipped(pipelineRunFacts).IsSkipped {
continue
}
// propagate previous task results
resources.PropagateResults(rpt, pipelineRunFacts.State)
// propagate previous task artifacts
err = resources.PropagateArtifacts(rpt, pipelineRunFacts.State)
if err != nil {
logger.Errorf("Failed to propagate artifacts due to error: %v", err)
return controller.NewPermanentError(err)
}
// Validate parameter types in matrix after apply substitutions from Task Results
if rpt.PipelineTask.IsMatrixed() {
if err := resources.ValidateParameterTypesInMatrix(pipelineRunFacts.State); err != nil {
logger.Errorf("Failed to validate matrix %q with error %v", pr.Name, err)
pr.Status.MarkFailed(v1.PipelineRunReasonInvalidMatrixParameterTypes.String(),
"Failed to validate matrix %q with error %v", pipelineErrors.WrapUserError(err))
return controller.NewPermanentError(err)
}
}
switch {
case rpt.IsChildPipeline():
rpt.ChildPipelineRuns, err = c.createChildPipelineRuns(ctx, rpt, pr, pipelineRunFacts)
if err != nil {
recorder.Eventf(pr, corev1.EventTypeWarning, "ChildPipelineRunsCreationFailed", "Failed to create child (PIP) PipelineRuns %q: %v", rpt.ChildPipelineRunNames, err)
err = fmt.Errorf("error creating child PipelineRuns called %s for PipelineTask %s from PipelineRun %s: %w", rpt.ChildPipelineRunNames, rpt.PipelineTask.Name, pr.Name, err)
return err
}
case rpt.IsCustomTask():
rpt.CustomRuns, err = c.createCustomRuns(ctx, rpt, pr, pipelineRunFacts)
if err != nil {
recorder.Eventf(pr, corev1.EventTypeWarning, "RunsCreationFailed", "Failed to create CustomRuns %q: %v", rpt.CustomRunNames, err)
err = fmt.Errorf("error creating CustomRuns called %s for PipelineTask %s from PipelineRun %s: %w", rpt.CustomRunNames, rpt.PipelineTask.Name, pr.Name, err)
return err
}
default:
rpt.TaskRuns, err = c.createTaskRuns(ctx, rpt, pr, pipelineRunFacts)
if err != nil {
recorder.Eventf(pr, corev1.EventTypeWarning, "TaskRunsCreationFailed", "Failed to create TaskRuns %q: %v", rpt.TaskRunNames, err)
err = fmt.Errorf("error creating TaskRuns called %s for PipelineTask %s from PipelineRun %s: %w", rpt.TaskRunNames, rpt.PipelineTask.Name, pr.Name, err)
return err
}
}
}
return nil
}
// setFinallyStartedTimeIfNeeded sets the PipelineRun.Status.FinallyStartedTime to the current time if it's nil.
func (c *Reconciler) setFinallyStartedTimeIfNeeded(pr *v1.PipelineRun, facts *resources.PipelineRunFacts) {
if pr.Status.FinallyStartTime == nil {
pr.Status.FinallyStartTime = &metav1.Time{Time: c.Clock.Now()}
}
if facts.TimeoutsState.FinallyStartTime == nil {
facts.TimeoutsState.FinallyStartTime = &pr.Status.FinallyStartTime.Time
}
}
func (c *Reconciler) createChildPipelineRuns(
ctx context.Context,
rpt *resources.ResolvedPipelineTask,
pr *v1.PipelineRun,
facts *resources.PipelineRunFacts,
) ([]*v1.PipelineRun, error) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "createChildPipelineRuns")
defer span.End()
var childPipelineRuns []*v1.PipelineRun
for _, childPipelineRunName := range rpt.ChildPipelineRunNames {
var params v1.Params
childPipelineRun, err := c.createChildPipelineRun(ctx, childPipelineRunName, params, rpt, pr, facts)
if err != nil {
err := c.handleRunCreationError(pr, err)
return nil, err
}
childPipelineRuns = append(childPipelineRuns, childPipelineRun)
}
return childPipelineRuns, nil
}
func (c *Reconciler) createChildPipelineRun(
ctx context.Context,
childPipelineRunName string,
params v1.Params,
rpt *resources.ResolvedPipelineTask,
pr *v1.PipelineRun,
facts *resources.PipelineRunFacts,
) (*v1.PipelineRun, error) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "createChildPipelineRun")
defer span.End()
logger := logging.FromContext(ctx)
rpt.PipelineTask = resources.ApplyPipelineTaskContexts(rpt.PipelineTask, pr.Status, facts)
newChildPipelineRun := &v1.PipelineRun{
ObjectMeta: metav1.ObjectMeta{
Name: childPipelineRunName,
Namespace: pr.Namespace,
OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(pr)},
Labels: createChildResourceLabels(pr, rpt.PipelineTask.Name, true),
Annotations: createChildResourceAnnotations(pr),
},
Spec: v1.PipelineRunSpec{
PipelineSpec: rpt.PipelineTask.PipelineSpec,
},
}
logger.Infof(
"Creating a new child (PIP) PipelineRun object %s for pipeline task %s",
childPipelineRunName,
rpt.PipelineTask.Name,
)
return c.PipelineClientSet.TektonV1().
PipelineRuns(pr.Namespace).
Create(ctx, newChildPipelineRun, metav1.CreateOptions{})
}
func (c *Reconciler) createTaskRuns(ctx context.Context, rpt *resources.ResolvedPipelineTask, pr *v1.PipelineRun, facts *resources.PipelineRunFacts) ([]*v1.TaskRun, error) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "createTaskRuns")
defer span.End()
var matrixCombinations []v1.Params
if rpt.PipelineTask.IsMatrixed() {
matrixCombinations = rpt.PipelineTask.Matrix.FanOut()
}
// validate the param values meet resolved Task Param Enum requirements before creating TaskRuns
if config.FromContextOrDefaults(ctx).FeatureFlags.EnableParamEnum {
for i := range rpt.TaskRunNames {
var params v1.Params
if len(matrixCombinations) > i {
params = matrixCombinations[i]
}
params = append(params, rpt.PipelineTask.Params...)
if err := taskrun.ValidateEnumParam(ctx, params, rpt.ResolvedTask.TaskSpec.Params); err != nil {
pr.Status.MarkFailed(v1.PipelineRunReasonInvalidParamValue.String(),
"Invalid param value from PipelineTask \"%s\": %v",
rpt.PipelineTask.Name, pipelineErrors.WrapUserError(err))
return nil, controller.NewPermanentError(err)
}
}
}
var taskRuns []*v1.TaskRun
for i, taskRunName := range rpt.TaskRunNames {
var params v1.Params
if len(matrixCombinations) > i {
params = matrixCombinations[i]
}
taskRun, err := c.createTaskRun(ctx, taskRunName, params, rpt, pr, facts)
if err != nil {
err := c.handleRunCreationError(pr, err)
return nil, err
}
taskRuns = append(taskRuns, taskRun)
}
return taskRuns, nil
}
func (c *Reconciler) createTaskRun(ctx context.Context, taskRunName string, params v1.Params, rpt *resources.ResolvedPipelineTask, pr *v1.PipelineRun, facts *resources.PipelineRunFacts) (*v1.TaskRun, error) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "createTaskRun")
defer span.End()
logger := logging.FromContext(ctx)
rpt.PipelineTask = resources.ApplyPipelineTaskContexts(rpt.PipelineTask, pr.Status, facts)
taskRunSpec := pr.GetTaskRunSpec(rpt.PipelineTask.Name)
params = append(params, rpt.PipelineTask.Params...)
tr := &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{
Name: taskRunName,
Namespace: pr.Namespace,
OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(pr)},
Labels: combineTaskRunAndTaskSpecLabels(pr, rpt.PipelineTask),
Annotations: combineTaskRunAndTaskSpecAnnotations(pr, rpt.PipelineTask),
},
Spec: v1.TaskRunSpec{
Retries: rpt.PipelineTask.Retries,
Params: params,
ServiceAccountName: taskRunSpec.ServiceAccountName,
PodTemplate: taskRunSpec.PodTemplate,
StepSpecs: taskRunSpec.StepSpecs,
SidecarSpecs: taskRunSpec.SidecarSpecs,
ComputeResources: taskRunSpec.ComputeResources,
},
}
// Add current spanContext as annotations to TaskRun
// so that tracing can be continued under the same traceId
if spanContext, err := getMarshalledSpanFromContext(ctx); err == nil {
tr.Annotations[TaskRunSpanContextAnnotation] = spanContext
}
if rpt.PipelineTask.OnError == v1.PipelineTaskContinue {
tr.Annotations[v1.PipelineTaskOnErrorAnnotation] = string(v1.PipelineTaskContinue)
}
if rpt.PipelineTask.Timeout != nil {
tr.Spec.Timeout = rpt.PipelineTask.Timeout
}
// taskRunSpec timeout overrides pipeline task timeout
if taskRunSpec.Timeout != nil {
tr.Spec.Timeout = taskRunSpec.Timeout
}
if rpt.ResolvedTask.TaskName != "" {
// We pass the entire, original task ref because it may contain additional references like a Bundle url.
tr.Spec.TaskRef = rpt.PipelineTask.TaskRef
} else if rpt.ResolvedTask.TaskSpec != nil {
tr.Spec.TaskSpec = rpt.ResolvedTask.TaskSpec
}
var pipelinePVCWorkspaceName string
var err error
tr.Spec.Workspaces, pipelinePVCWorkspaceName, err = c.getTaskrunWorkspaces(ctx, pr, rpt)
if err != nil {
return nil, err
}
aaBehavior, err := affinityassistant.GetAffinityAssistantBehavior(ctx)
if err != nil {
return nil, err
}
if aaAnnotationVal := getAffinityAssistantAnnotationVal(aaBehavior, pipelinePVCWorkspaceName, pr.Name); aaAnnotationVal != "" {
tr.Annotations[workspace.AnnotationAffinityAssistantName] = aaAnnotationVal
}
logger.Infof("Creating a new TaskRun object %s for pipeline task %s", taskRunName, rpt.PipelineTask.Name)
cfg := config.FromContextOrDefaults(ctx)
if !cfg.FeatureFlags.EnableWaitExponentialBackoff {
return c.PipelineClientSet.TektonV1().TaskRuns(pr.Namespace).Create(ctx, tr, metav1.CreateOptions{})
}
backoff := wait.Backoff{
Duration: cfg.WaitExponentialBackoff.Duration, // Initial delay before retry
Factor: cfg.WaitExponentialBackoff.Factor, // Multiplier for exponential growth
Steps: cfg.WaitExponentialBackoff.Steps, // Maximum number of retry attempts
Cap: cfg.WaitExponentialBackoff.Cap, // Maximum time spent before giving up
}
var result *v1.TaskRun
err = wait.ExponentialBackoff(backoff, func() (bool, error) {
result = nil
result, err = c.PipelineClientSet.TektonV1().TaskRuns(pr.Namespace).Create(ctx, tr, metav1.CreateOptions{})
if err != nil {
if ctrl.IsWebhookTimeout(err) {
return false, nil // retry
}
return false, err // do not retry
}
return true, nil
})
if err != nil {
return nil, err
}
return result, nil
}
// handleRunCreationError marks the PipelineRun as failed and returns a permanent error if the run creation error is not retryable
func (c *Reconciler) handleRunCreationError(pr *v1.PipelineRun, err error) error {
if controller.IsPermanentError(err) {
pr.Status.MarkFailed(v1.PipelineRunReasonCreateRunFailed.String(), err.Error())
return err
}
// This is not a complete list of permanent errors. Any permanent error with child (PinP)
// PipelinRun/TaskRun/CustomRun creation can be added here.
if apierrors.IsInvalid(err) || apierrors.IsBadRequest(err) {
pr.Status.MarkFailed(v1.PipelineRunReasonCreateRunFailed.String(), err.Error())
return controller.NewPermanentError(err)
}
return err
}
func (c *Reconciler) createCustomRuns(ctx context.Context, rpt *resources.ResolvedPipelineTask, pr *v1.PipelineRun, facts *resources.PipelineRunFacts) ([]*v1beta1.CustomRun, error) {
var customRuns []*v1beta1.CustomRun
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "createCustomRuns")
defer span.End()
var matrixCombinations []v1.Params
if rpt.PipelineTask.IsMatrixed() {
matrixCombinations = rpt.PipelineTask.Matrix.FanOut()
}
for i, customRunName := range rpt.CustomRunNames {
var params v1.Params
if len(matrixCombinations) > i {
params = matrixCombinations[i]
}
customRun, err := c.createCustomRun(ctx, customRunName, params, rpt, pr, facts)
if err != nil {
err := c.handleRunCreationError(pr, err)
return nil, err
}
customRuns = append(customRuns, customRun)
}
return customRuns, nil
}
func (c *Reconciler) createCustomRun(ctx context.Context, runName string, params v1.Params, rpt *resources.ResolvedPipelineTask, pr *v1.PipelineRun, facts *resources.PipelineRunFacts) (*v1beta1.CustomRun, error) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "createCustomRun")
defer span.End()
logger := logging.FromContext(ctx)
rpt.PipelineTask = resources.ApplyPipelineTaskContexts(rpt.PipelineTask, pr.Status, facts)
taskRunSpec := pr.GetTaskRunSpec(rpt.PipelineTask.Name)
params = append(params, rpt.PipelineTask.Params...)
taskTimeout := rpt.PipelineTask.Timeout
// taskRunSpec timeout overrides pipeline task timeout
if taskRunSpec.Timeout != nil {
taskTimeout = taskRunSpec.Timeout
}
var pipelinePVCWorkspaceName string
var err error
var workspaces []v1.WorkspaceBinding
workspaces, pipelinePVCWorkspaceName, err = c.getTaskrunWorkspaces(ctx, pr, rpt)
if err != nil {
return nil, err
}
objectMeta := metav1.ObjectMeta{
Name: runName,
Namespace: pr.Namespace,
OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(pr)},
Labels: createChildResourceLabels(pr, rpt.PipelineTask.Name, true),
Annotations: createChildResourceAnnotations(pr),
}
// TaskRef, Params and Workspaces are converted to v1beta1 since CustomRuns
// is still in v1beta1 apiVersion
var customRef *v1beta1.TaskRef
if rpt.PipelineTask.TaskRef != nil {
customRef = &v1beta1.TaskRef{}
customRef.ConvertFrom(ctx, *rpt.PipelineTask.TaskRef)
}
customRunParams := v1beta1.Params{}
for _, p := range params {
v1beta1Param := v1beta1.Param{}
v1beta1Param.ConvertFrom(ctx, p)
customRunParams = append(customRunParams, v1beta1Param)
}
customRunWorkspaces := []v1beta1.WorkspaceBinding{}
for _, w := range workspaces {
v1beta1WorkspaceBinding := v1beta1.WorkspaceBinding{}
v1beta1WorkspaceBinding.ConvertFrom(ctx, w)
customRunWorkspaces = append(customRunWorkspaces, v1beta1WorkspaceBinding)
}
r := &v1beta1.CustomRun{
ObjectMeta: objectMeta,
Spec: v1beta1.CustomRunSpec{
Retries: rpt.PipelineTask.Retries,
CustomRef: customRef,
Params: customRunParams,
ServiceAccountName: taskRunSpec.ServiceAccountName,
Timeout: taskTimeout,
Workspaces: customRunWorkspaces,
},
}
if rpt.PipelineTask.TaskSpec != nil {
j, err := json.Marshal(rpt.PipelineTask.TaskSpec.Spec)
if err != nil {
return nil, err
}
r.Spec.CustomSpec = &v1beta1.EmbeddedCustomRunSpec{
TypeMeta: runtime.TypeMeta{
APIVersion: rpt.PipelineTask.TaskSpec.APIVersion,
Kind: rpt.PipelineTask.TaskSpec.Kind,
},
Metadata: v1beta1.PipelineTaskMetadata(rpt.PipelineTask.TaskSpec.Metadata),
Spec: runtime.RawExtension{
Raw: j,
},
}
}
// Set the affinity assistant annotation in case the custom task creates TaskRuns or Pods
// that can take advantage of it.
aaBehavior, err := affinityassistant.GetAffinityAssistantBehavior(ctx)
if err != nil {
return nil, err
}
if aaAnnotationVal := getAffinityAssistantAnnotationVal(aaBehavior, pipelinePVCWorkspaceName, pr.Name); aaAnnotationVal != "" {
r.Annotations[workspace.AnnotationAffinityAssistantName] = aaAnnotationVal
}
logger.Infof("Creating a new CustomRun object %s", runName)
cfg := config.FromContextOrDefaults(ctx)
if !cfg.FeatureFlags.EnableWaitExponentialBackoff {
return c.PipelineClientSet.TektonV1beta1().CustomRuns(pr.Namespace).Create(ctx, r, metav1.CreateOptions{})
}
backoff := wait.Backoff{
Duration: cfg.WaitExponentialBackoff.Duration, // Initial delay before retry
Factor: cfg.WaitExponentialBackoff.Factor, // Multiplier for exponential growth
Steps: cfg.WaitExponentialBackoff.Steps, // Maximum number of retry attempts
Cap: cfg.WaitExponentialBackoff.Cap, // Maximum time spent before giving up
}
var result *v1beta1.CustomRun
err = wait.ExponentialBackoff(backoff, func() (bool, error) {
result = nil
result, err = c.PipelineClientSet.TektonV1beta1().CustomRuns(pr.Namespace).Create(ctx, r, metav1.CreateOptions{})
if err != nil {
if ctrl.IsWebhookTimeout(err) {
return false, nil // retry
}
return false, err // do not retry
}
return true, nil
})
if err != nil {
return nil, err
}
return result, nil
}
// propagateWorkspaces identifies the workspaces that the pipeline task usess
// It adds the additional workspaces to the pipeline task's workspaces after
// creating workspace bindings. Finally, it returns the updated resolved pipeline task.
func propagateWorkspaces(rpt *resources.ResolvedPipelineTask) (*resources.ResolvedPipelineTask, error) {
ts := rpt.PipelineTask.TaskSpec.TaskSpec
workspacesUsedInSteps, err := workspace.FindWorkspacesUsedByTask(ts)
if err != nil {
return rpt, err
}
ptw := sets.NewString()
for _, ws := range rpt.PipelineTask.Workspaces {
ptw.Insert(ws.Name)
}
for wSpace := range workspacesUsedInSteps {
if !ptw.Has(wSpace) {
rpt.PipelineTask.Workspaces = append(rpt.PipelineTask.Workspaces, v1.WorkspacePipelineTaskBinding{Name: wSpace})
}
}
return rpt, nil
}
func (c *Reconciler) getTaskrunWorkspaces(ctx context.Context, pr *v1.PipelineRun, rpt *resources.ResolvedPipelineTask) ([]v1.WorkspaceBinding, string, error) {
var err error
var workspaces []v1.WorkspaceBinding
var pipelinePVCWorkspaceName string
pipelineRunWorkspaces := make(map[string]v1.WorkspaceBinding)
for _, binding := range pr.Spec.Workspaces {
pipelineRunWorkspaces[binding.Name] = binding
}
// Propagate required workspaces from pipelineRun to the pipelineTasks
if rpt.PipelineTask.TaskSpec != nil {
rpt, err = propagateWorkspaces(rpt)
if err != nil {
// This error cannot be recovered without modifying the TaskSpec
return nil, "", controller.NewPermanentError(err)
}
}
for _, ws := range rpt.PipelineTask.Workspaces {
taskWorkspaceName, pipelineTaskSubPath, pipelineWorkspaceName := ws.Name, ws.SubPath, ws.Workspace
pipelineWorkspace := pipelineWorkspaceName
if pipelineWorkspaceName == "" {
pipelineWorkspace = taskWorkspaceName
}
if b, hasBinding := pipelineRunWorkspaces[pipelineWorkspace]; hasBinding {
if b.PersistentVolumeClaim != nil || b.VolumeClaimTemplate != nil {
pipelinePVCWorkspaceName = pipelineWorkspace
}
aaBehavior, err := affinityassistant.GetAffinityAssistantBehavior(ctx)
if err != nil {
return nil, "", err
}
workspace := c.taskWorkspaceByWorkspaceVolumeSource(ctx, pipelinePVCWorkspaceName, pr.Name, b, taskWorkspaceName, pipelineTaskSubPath, *kmeta.NewControllerRef(pr), aaBehavior)
workspaces = append(workspaces, workspace)
} else {
workspaceIsOptional := false
if rpt.ResolvedTask != nil && rpt.ResolvedTask.TaskSpec != nil {
for _, taskWorkspaceDeclaration := range rpt.ResolvedTask.TaskSpec.Workspaces {
if taskWorkspaceDeclaration.Name == taskWorkspaceName && taskWorkspaceDeclaration.Optional {
workspaceIsOptional = true
break
}
}
}
if !workspaceIsOptional {
err = fmt.Errorf("expected workspace %q to be provided by pipelinerun for pipeline task %q", pipelineWorkspace, rpt.PipelineTask.Name)
// This error cannot be recovered without modifying the PipelineRun
return nil, "", controller.NewPermanentError(err)
}
}
}
// replace pipelineRun context variables in workspace subPath in the workspace binding
var p string
if pr.Spec.PipelineRef != nil {
p = pr.Spec.PipelineRef.Name
}
for j := range workspaces {
workspaces[j].SubPath = substitution.ApplyReplacements(workspaces[j].SubPath, resources.GetContextReplacements(p, pr))
}
return workspaces, pipelinePVCWorkspaceName, nil
}
// taskWorkspaceByWorkspaceVolumeSource returns the WorkspaceBinding to be bound to each TaskRun in the Pipeline Task.
// If the PipelineRun WorkspaceBinding is a volumeClaimTemplate, the returned WorkspaceBinding references a PersistentVolumeClaim created for the PipelineRun WorkspaceBinding based on the PipelineRun as OwnerReference.
// Otherwise, the returned WorkspaceBinding references the same volume as the PipelineRun WorkspaceBinding, with the file path joined with pipelineTaskSubPath as the binding subpath.
func (c *Reconciler) taskWorkspaceByWorkspaceVolumeSource(ctx context.Context, pipelineWorkspaceName string, prName string, wb v1.WorkspaceBinding, taskWorkspaceName string, pipelineTaskSubPath string, owner metav1.OwnerReference, aaBehavior affinityassistant.AffinityAssistantBehavior) v1.WorkspaceBinding {
if wb.VolumeClaimTemplate == nil {
binding := *wb.DeepCopy()
binding.Name = taskWorkspaceName
binding.SubPath = combinedSubPath(wb.SubPath, pipelineTaskSubPath)
return binding
}
binding := v1.WorkspaceBinding{
SubPath: combinedSubPath(wb.SubPath, pipelineTaskSubPath),
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{},
}
binding.Name = taskWorkspaceName
switch aaBehavior {
case affinityassistant.AffinityAssistantPerWorkspace, affinityassistant.AffinityAssistantDisabled:
binding.PersistentVolumeClaim.ClaimName = volumeclaim.GeneratePVCNameFromWorkspaceBinding(wb.VolumeClaimTemplate.Name, wb, owner)
case affinityassistant.AffinityAssistantPerPipelineRun, affinityassistant.AffinityAssistantPerPipelineRunWithIsolation:
binding.PersistentVolumeClaim.ClaimName = getPersistentVolumeClaimNameWithAffinityAssistant("", prName, wb, owner)
}
return binding
}
// combinedSubPath returns the combined value of the optional subPath from workspaceBinding and the optional
// subPath from pipelineTask. If both is set, they are joined with a slash.
func combinedSubPath(workspaceSubPath string, pipelineTaskSubPath string) string {
if workspaceSubPath == "" {
return pipelineTaskSubPath
} else if pipelineTaskSubPath == "" {
return workspaceSubPath
}
return filepath.Join(workspaceSubPath, pipelineTaskSubPath)
}
func createChildResourceAnnotations(pr *v1.PipelineRun) map[string]string {
// propagate annotations from PipelineRun to child (PinP) PipelineRun/TaskRun/CustomRun
annotations := make(map[string]string, len(pr.ObjectMeta.Annotations)+1)
for key, val := range pr.ObjectMeta.Annotations {
annotations[key] = val
}
return kmap.Filter(annotations, func(s string) bool {
return filterReservedAnnotationRegexp.MatchString(s)
})
}
func propagatePipelineNameLabelToPipelineRun(pr *v1.PipelineRun) error {
if pr.ObjectMeta.Labels == nil {
pr.ObjectMeta.Labels = make(map[string]string)
}
if _, ok := pr.ObjectMeta.Labels[pipeline.PipelineLabelKey]; ok {
return nil
}
switch {
case pr.Spec.PipelineRef != nil && pr.Spec.PipelineRef.Name != "":
pr.ObjectMeta.Labels[pipeline.PipelineLabelKey] = pr.Spec.PipelineRef.Name
case pr.Spec.PipelineSpec != nil:
pr.ObjectMeta.Labels[pipeline.PipelineLabelKey] = pr.Name
case pr.Spec.PipelineRef != nil && pr.Spec.PipelineRef.Resolver != "":
pr.ObjectMeta.Labels[pipeline.PipelineLabelKey] = pr.Name
// https://tekton.dev/docs/pipelines/cluster-resolver/#pipeline-resolution
var kind, name string
for _, param := range pr.Spec.PipelineRef.Params {
if param.Name == "kind" {
kind = param.Value.StringVal
}
if param.Name == "name" {
name = param.Value.StringVal
}
}
if kind == "pipeline" {
pr.ObjectMeta.Labels[pipeline.PipelineLabelKey] = name
}
default:
return fmt.Errorf("pipelineRun %s not providing PipelineRef or PipelineSpec", pr.Name)
}
return nil
}
func createChildResourceLabels(pr *v1.PipelineRun, pipelineTaskName string, includePipelineRunLabels bool) map[string]string {
// propagate labels from PipelineRun to child (PinP) PipelineRun/TaskRun/CustomRun
labels := make(map[string]string, len(pr.ObjectMeta.Labels)+1)
if includePipelineRunLabels {
for key, val := range pr.ObjectMeta.Labels {
labels[key] = val
}
}
labels[pipeline.PipelineRunLabelKey] = pr.Name
labels[pipeline.PipelineRunUIDLabelKey] = string(pr.UID)
if pipelineTaskName != "" {
labels[pipeline.PipelineTaskLabelKey] = pipelineTaskName
}
if pr.Status.PipelineSpec != nil {
// check if a task is part of the "tasks" section, add a label to identify it during the runtime
for _, f := range pr.Status.PipelineSpec.Tasks {
if pipelineTaskName == f.Name {
labels[pipeline.MemberOfLabelKey] = v1.PipelineTasks
break
}
}
// check if a task is part of the "finally" section, add a label to identify it during the runtime
for _, f := range pr.Status.PipelineSpec.Finally {
if pipelineTaskName == f.Name {
labels[pipeline.MemberOfLabelKey] = v1.PipelineFinallyTasks
break
}
}
}
return labels
}
func combineTaskRunAndTaskSpecLabels(pr *v1.PipelineRun, pipelineTask *v1.PipelineTask) map[string]string {
labels := make(map[string]string)
taskRunSpec := pr.GetTaskRunSpec(pipelineTask.Name)
if taskRunSpec.Metadata != nil {
addMetadataByPrecedence(labels, taskRunSpec.Metadata.Labels)
}
addMetadataByPrecedence(labels, createChildResourceLabels(pr, pipelineTask.Name, true))
if pipelineTask.TaskSpec != nil {
addMetadataByPrecedence(labels, pipelineTask.TaskSpecMetadata().Labels)
}
return labels
}
func combineTaskRunAndTaskSpecAnnotations(pr *v1.PipelineRun, pipelineTask *v1.PipelineTask) map[string]string {
annotations := make(map[string]string)
taskRunSpec := pr.GetTaskRunSpec(pipelineTask.Name)
if taskRunSpec.Metadata != nil {
addMetadataByPrecedence(annotations, taskRunSpec.Metadata.Annotations)
}
addMetadataByPrecedence(annotations, createChildResourceAnnotations(pr))
if pipelineTask.TaskSpec != nil {
addMetadataByPrecedence(annotations, pipelineTask.TaskSpecMetadata().Annotations)
}
return annotations
}
// addMetadataByPrecedence() adds the elements in addedMetadata to metadata. If the same key is present in both maps, the value from metadata will be used.
func addMetadataByPrecedence(metadata map[string]string, addedMetadata map[string]string) {
for key, value := range addedMetadata {
// add new annotations/labels if the key not exists in current ones
if _, ok := metadata[key]; !ok {
metadata[key] = value
}
}
}
func (c *Reconciler) updateLabelsAndAnnotations(ctx context.Context, pr *v1.PipelineRun) (*v1.PipelineRun, error) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "updateLabelsAndAnnotations")
defer span.End()
newPr, err := c.pipelineRunLister.PipelineRuns(pr.Namespace).Get(pr.Name)
if err != nil {
return nil, fmt.Errorf("error getting PipelineRun %s when updating labels/annotations: %w", pr.Name, err)
}
if !reflect.DeepEqual(pr.ObjectMeta.Labels, newPr.ObjectMeta.Labels) || !reflect.DeepEqual(pr.ObjectMeta.Annotations, newPr.ObjectMeta.Annotations) {
// Note that this uses Update vs. Patch because the former is significantly easier to test.
// If we want to switch this to Patch, then we will need to teach the utilities in test/controller.go
// to deal with Patch (setting resourceVersion, and optimistic concurrency checks).
newPr = newPr.DeepCopy()
// Properly merge labels and annotations, as the labels *might* have changed during the reconciliation
newPr.Labels = kmap.Union(newPr.Labels, pr.Labels)
newPr.Annotations = kmap.Union(newPr.Annotations, pr.Annotations)
return c.PipelineClientSet.TektonV1().PipelineRuns(pr.Namespace).Update(ctx, newPr, metav1.UpdateOptions{})
}
return newPr, nil
}
func storePipelineSpecAndMergeMeta(ctx context.Context, pr *v1.PipelineRun, ps *v1.PipelineSpec, meta *resolutionutil.ResolvedObjectMeta) error {
// Only store the PipelineSpec once, if it has never been set before.
if pr.Status.PipelineSpec == nil {
pr.Status.PipelineSpec = ps
if meta == nil {
return nil
}
// Propagate labels from Pipeline to PipelineRun. PipelineRun labels take precedences over Pipeline.
pr.ObjectMeta.Labels = kmap.Union(meta.Labels, pr.ObjectMeta.Labels)
if len(meta.Name) > 0 {
pr.ObjectMeta.Labels[pipeline.PipelineLabelKey] = meta.Name
}
// Propagate annotations from Pipeline to PipelineRun. PipelineRun annotations take precedences over Pipeline.
pr.ObjectMeta.Annotations = kmap.Union(kmap.ExcludeKeys(meta.Annotations, tknreconciler.KubectlLastAppliedAnnotationKey), pr.ObjectMeta.Annotations)
}
// Propagate refSource from remote resolution to PipelineRun Status
// This lives outside of the status.spec check to avoid the case where only the spec is available in the first reconcile and source comes in next reconcile.
cfg := config.FromContextOrDefaults(ctx)
if cfg.FeatureFlags.EnableProvenanceInStatus {
if pr.Status.Provenance == nil {
pr.Status.Provenance = &v1.Provenance{}
}
// Store FeatureFlags in the Provenance.
pr.Status.Provenance.FeatureFlags = cfg.FeatureFlags
if meta != nil && meta.RefSource != nil && pr.Status.Provenance.RefSource == nil {
pr.Status.Provenance.RefSource = meta.RefSource
}
}
return nil
}
func (c *Reconciler) updatePipelineRunStatusFromInformer(ctx context.Context, pr *v1.PipelineRun) error {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "updatePipelineRunStatusFromInformer")
defer span.End()
logger := logging.FromContext(ctx)
// Get the parent PipelineRun label that is set on each child (PinP) PipelineRun/TaskRun/CustomRun. Do not include the propagated labels from the
// Pipeline and PipelineRun. The user could change them during the lifetime of the PipelineRun so the
// current labels may not be set on the previously created TaskRuns.
pipelineRunLabels := createChildResourceLabels(pr, "", false)
childPipelineRuns, err := c.pipelineRunLister.PipelineRuns(pr.Namespace).List(k8slabels.SelectorFromSet(pipelineRunLabels))
if err != nil {
logger.Errorf("Could not list PipelineRuns %#v", err)
return err
}
taskRuns, err := c.taskRunLister.TaskRuns(pr.Namespace).List(k8slabels.SelectorFromSet(pipelineRunLabels))
if err != nil {
logger.Errorf("Could not list TaskRuns %#v", err)
return err
}
customRuns, err := c.customRunLister.CustomRuns(pr.Namespace).List(k8slabels.SelectorFromSet(pipelineRunLabels))
if err != nil {
logger.Errorf("Could not list CustomRuns %#v", err)
return err
}
return updatePipelineRunStatusFromChildObjects(ctx, logger, pr, childPipelineRuns, taskRuns, customRuns)
}
func updatePipelineRunStatusFromChildObjects(ctx context.Context, logger *zap.SugaredLogger, pr *v1.PipelineRun, childPipelineRuns []*v1.PipelineRun, taskRuns []*v1.TaskRun, customRuns []*v1beta1.CustomRun) error {
updatePipelineRunStatusFromChildRefs(logger, pr, childPipelineRuns, taskRuns, customRuns)
return validateChildObjectsInPipelineRunStatus(ctx, pr.Status)
}
func validateChildObjectsInPipelineRunStatus(ctx context.Context, prs v1.PipelineRunStatus) error {
var err error
for _, cr := range prs.ChildReferences {
switch cr.Kind {
case taskRun, customRun, pipelineRun:
continue
default:
err = errors.Join(err, fmt.Errorf("child with name %s has unknown kind %s", cr.Name, cr.Kind))
}
}
return err
}
// filterChildPipelineRunsForParentPipelineRunStatus returns child (PinP) PipelineRuns owned by the parent PipelineRun.
func filterChildPipelineRunsForParentPipelineRunStatus(logger *zap.SugaredLogger, pr *v1.PipelineRun, childPipelineRuns []*v1.PipelineRun) []*v1.PipelineRun {
var owned []*v1.PipelineRun
for _, child := range childPipelineRuns {
// Only process child (PinP) PipelineRuns that are owned by this parent PipelineRun.
// This skips PipelineRuns that are indirectly created by the PipelineRun (e.g. by custom tasks).
if len(child.OwnerReferences) == 0 || child.OwnerReferences[0].UID != pr.ObjectMeta.UID {
logger.Debugf("Found a child (PIP) PipelineRun %s that is not owned by this parent PipelineRun", child.Name)
continue
}
owned = append(owned, child)
}
return owned
}
func filterTaskRunsForPipelineRunStatus(logger *zap.SugaredLogger, pr *v1.PipelineRun, trs []*v1.TaskRun) []*v1.TaskRun {
var ownedTaskRuns []*v1.TaskRun
for _, tr := range trs {
// Only process TaskRuns that are owned by this PipelineRun.
// This skips TaskRuns that are indirectly created by the PipelineRun (e.g. by custom tasks).
if len(tr.OwnerReferences) < 1 || tr.OwnerReferences[0].UID != pr.ObjectMeta.UID {
logger.Debugf("Found a TaskRun %s that is not owned by this PipelineRun", tr.Name)
continue
}
ownedTaskRuns = append(ownedTaskRuns, tr)
}
return ownedTaskRuns
}
// filterCustomRunsForPipelineRunStatus filters the given slice of customRuns, returning information only those owned by the given PipelineRun.
func filterCustomRunsForPipelineRunStatus(logger *zap.SugaredLogger, pr *v1.PipelineRun, customRuns []*v1beta1.CustomRun) ([]string, []string, []schema.GroupVersionKind, []*v1beta1.CustomRunStatus) {
var names []string
var taskLabels []string
var gvks []schema.GroupVersionKind
var statuses []*v1beta1.CustomRunStatus
// Loop over all the customRuns associated to Tasks
for _, cr := range customRuns {
// Only process customRuns that are owned by this PipelineRun.
// This skips customRuns that are indirectly created by the PipelineRun (e.g. by custom tasks).
if len(cr.GetObjectMeta().GetOwnerReferences()) < 1 || cr.GetObjectMeta().GetOwnerReferences()[0].UID != pr.ObjectMeta.UID {
logger.Debugf("Found a %s %s that is not owned by this PipelineRun", cr.GetObjectKind().GroupVersionKind().Kind, cr.GetObjectMeta().GetName())
continue
}
names = append(names, cr.GetObjectMeta().GetName())
taskLabels = append(taskLabels, cr.GetObjectMeta().GetLabels()[pipeline.PipelineTaskLabelKey])
statuses = append(statuses, &cr.Status)
// We can't just get the gvk from the customRun's TypeMeta because that isn't populated for resources created through the fake client.
gvks = append(gvks, v1beta1.SchemeGroupVersion.WithKind(customRun))
}
// NAMES are names
return names, taskLabels, gvks, statuses
}
func updatePipelineRunStatusFromChildRefs(logger *zap.SugaredLogger, pr *v1.PipelineRun, childPipelineRuns []*v1.PipelineRun, trs []*v1.TaskRun, customRuns []*v1beta1.CustomRun) {
// If no child (PinP) PipelineRun, TaskRun or CustomRun was found, nothing to be done. We never remove child references from the status.
// We do still return an empty map of TaskRun/Run names keyed by PipelineTask name for later functions.
if len(childPipelineRuns) == 0 && len(trs) == 0 && len(customRuns) == 0 {
return
}
// Map PipelineTask names to child (PinP) PipelineRun, TaskRun or CustomRun child references that were already in the status
childRefByName := make(map[string]*v1.ChildStatusReference)
for i := range pr.Status.ChildReferences {
childRefByName[pr.Status.ChildReferences[i].Name] = &pr.Status.ChildReferences[i]
}
filteredChildPipelineRuns := filterChildPipelineRunsForParentPipelineRunStatus(logger, pr, childPipelineRuns)
// Loop over all the child (PinP) PipelineRuns associated to the parent PipelineRun
for _, fcpr := range filteredChildPipelineRuns {
labels := fcpr.GetLabels()
pipelineTaskName := labels[pipeline.PipelineTaskLabelKey]
// this child pipeline run is already in the status
if _, ok := childRefByName[fcpr.Name]; ok {
continue
}
logger.Infof("Found a child (PinP) PipelineRun %s that was missing from the parent PipelineRun status", fcpr.Name)
// Since this was recovered now, add it to the map, or it might be overwritten
childRefByName[fcpr.Name] = &v1.ChildStatusReference{
TypeMeta: runtime.TypeMeta{
APIVersion: v1.SchemeGroupVersion.String(),
Kind: pipelineRun,
},
Name: fcpr.Name,
PipelineTaskName: pipelineTaskName,
}
}
taskRuns := filterTaskRunsForPipelineRunStatus(logger, pr, trs)
// Loop over all the TaskRuns associated to Tasks
for _, tr := range taskRuns {
lbls := tr.GetLabels()
pipelineTaskName := lbls[pipeline.PipelineTaskLabelKey]
if _, ok := childRefByName[tr.Name]; !ok {
// This tr was missing from the status.
// Add it without conditions, which are handled in the next loop
logger.Infof("Found a TaskRun %s that was missing from the PipelineRun status", tr.Name)
// Since this was recovered now, add it to the map, or it might be overwritten
childRefByName[tr.Name] = &v1.ChildStatusReference{
TypeMeta: runtime.TypeMeta{
APIVersion: v1.SchemeGroupVersion.String(),
Kind: taskRun,
},
Name: tr.Name,
PipelineTaskName: pipelineTaskName,
}
}
}
// Get the names, their task label values, and their group/version/kind info for all CustomRuns or Runs associated with the PipelineRun
names, taskLabels, gvks, _ := filterCustomRunsForPipelineRunStatus(logger, pr, customRuns)
// Loop over that data and populate the child references
for idx := range names {
name := names[idx]
taskLabel := taskLabels[idx]
gvk := gvks[idx]
if _, ok := childRefByName[name]; !ok {
// This run was missing from the status.
// Add it without conditions, which are handled in the next loop
logger.Infof("Found a %s %s that was missing from the PipelineRun status", gvk.Kind, name)
// Since this was recovered now, add it to the map, or it might be overwritten
childRefByName[name] = &v1.ChildStatusReference{
TypeMeta: runtime.TypeMeta{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
},
Name: name,
PipelineTaskName: taskLabel,
}
}
}
var newChildRefs []v1.ChildStatusReference
for k := range childRefByName {
newChildRefs = append(newChildRefs, *childRefByName[k])
}
pr.Status.ChildReferences = newChildRefs
}
// conditionFromVerificationResult returns the ConditionTrustedResourcesVerified condition based on the VerificationResult, err is returned when the VerificationResult type is VerificationError
func conditionFromVerificationResult(verificationResult *trustedresources.VerificationResult, pr *v1.PipelineRun, resourceName string) (*apis.Condition, error) {
var condition *apis.Condition
var err error
switch verificationResult.VerificationResultType {
case trustedresources.VerificationError:
err = fmt.Errorf("pipelineRun %s/%s referred resource %s failed signature verification: %w", pr.Namespace, pr.Name, resourceName, verificationResult.Err)
condition = &apis.Condition{
Type: trustedresources.ConditionTrustedResourcesVerified,
Status: corev1.ConditionFalse,
Message: err.Error(),
}
case trustedresources.VerificationWarn:
condition = &apis.Condition{
Type: trustedresources.ConditionTrustedResourcesVerified,
Status: corev1.ConditionFalse,
Message: verificationResult.Err.Error(),
}
case trustedresources.VerificationPass:
condition = &apis.Condition{
Type: trustedresources.ConditionTrustedResourcesVerified,
Status: corev1.ConditionTrue,
}
case trustedresources.VerificationSkip:
// do nothing
}
return condition, err
}
// validatePipelineSpecAfterApplyParameters validates the PipelineSpec after apply parameters
// Maybe some fields are modified during apply parameters, need to validate again. For example, tasks[].OnError.
func validatePipelineSpecAfterApplyParameters(ctx context.Context, pipelineSpec *v1.PipelineSpec) (errs *apis.FieldError) {
if pipelineSpec == nil {
errs = errs.Also(apis.ErrMissingField("PipelineSpec"))
return
}
tasks := make([]v1.PipelineTask, 0, len(pipelineSpec.Tasks)+len(pipelineSpec.Finally))
tasks = append(tasks, pipelineSpec.Tasks...)
tasks = append(tasks, pipelineSpec.Finally...)
for _, t := range tasks {
errs = errs.Also(t.ValidateOnError(ctx))
}
return errs
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipelinespec
import (
"context"
"errors"
"fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
resolutionutil "github.com/tektoncd/pipeline/pkg/internal/resolution"
"github.com/tektoncd/pipeline/pkg/trustedresources"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetPipeline is a function used to retrieve Pipelines.
// VerificationResult is the result from trusted resources if the feature is enabled.
type GetPipeline func(context.Context, string) (*v1.Pipeline, *v1.RefSource, *trustedresources.VerificationResult, error)
// GetPipelineData will retrieve the Pipeline metadata and Spec associated with the
// provided PipelineRun. This can come from a reference Pipeline or from the PipelineRun's
// metadata and embedded PipelineSpec.
func GetPipelineData(ctx context.Context, pipelineRun *v1.PipelineRun, getPipeline GetPipeline) (*resolutionutil.ResolvedObjectMeta, *v1.PipelineSpec, error) {
pipelineMeta := metav1.ObjectMeta{}
var refSource *v1.RefSource
var verificationResult *trustedresources.VerificationResult
pipelineSpec := v1.PipelineSpec{}
switch {
case pipelineRun.Spec.PipelineRef != nil && pipelineRun.Spec.PipelineRef.Name != "":
// Get related pipeline for pipelinerun
p, source, vr, err := getPipeline(ctx, pipelineRun.Spec.PipelineRef.Name)
if err != nil {
return nil, nil, fmt.Errorf("error when getting Pipeline for PipelineRun %s: %w", pipelineRun.Name, err)
}
pipelineMeta = p.PipelineMetadata()
pipelineSpec = p.PipelineSpec()
refSource = source
verificationResult = vr
case pipelineRun.Spec.PipelineSpec != nil:
pipelineMeta = pipelineRun.ObjectMeta
pipelineSpec = *pipelineRun.Spec.PipelineSpec
// TODO: if we want to set RefSource for embedded pipeline, set it here.
// https://github.com/tektoncd/pipeline/issues/5522
case pipelineRun.Spec.PipelineRef != nil && pipelineRun.Spec.PipelineRef.Resolver != "":
pipeline, source, vr, err := getPipeline(ctx, "")
switch {
case err != nil:
return nil, nil, err
case pipeline == nil:
return nil, nil, errors.New("resolution of remote resource completed successfully but no pipeline was returned")
default:
pipelineMeta = pipeline.PipelineMetadata()
pipelineSpec = pipeline.PipelineSpec()
}
refSource = source
verificationResult = vr
default:
return nil, nil, fmt.Errorf("pipelineRun %s not providing PipelineRef or PipelineSpec", pipelineRun.Name)
}
pipelineSpec.SetDefaults(ctx)
return &resolutionutil.ResolvedObjectMeta{
ObjectMeta: &pipelineMeta,
RefSource: refSource,
VerificationResult: verificationResult,
}, &pipelineSpec, nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources"
"github.com/tektoncd/pipeline/pkg/substitution"
"github.com/tektoncd/pipeline/pkg/workspace"
)
const (
// resultsParseNumber is the value of how many parts we split from result reference. e.g. tasks.<taskName>.results.<objectResultName>
resultsParseNumber = 4
// objectElementResultsParseNumber is the value of how many parts we split from
// object attribute result reference. e.g. tasks.<taskName>.results.<objectResultName>.<individualAttribute>
objectElementResultsParseNumber = 5
// objectIndividualVariablePattern is the reference pattern for object individual keys params.<object_param_name>.<key_name>
objectIndividualVariablePattern = "params.%s.%s"
)
var paramPatterns = []string{
"params.%s",
"params[%q]",
"params['%s']",
}
// ApplyParameters applies the params from a PipelineRun.Params to a PipelineSpec.
func ApplyParameters(ctx context.Context, p *v1.PipelineSpec, pr *v1.PipelineRun) *v1.PipelineSpec {
// This assumes that the PipelineRun inputs have been validated against what the Pipeline requests.
// stringReplacements is used for standard single-string stringReplacements,
// while arrayReplacements/objectReplacements contains arrays/objects that need to be further processed.
stringReplacements := map[string]string{}
arrayReplacements := map[string][]string{}
objectReplacements := map[string]map[string]string{}
// Set all the default stringReplacements
for _, p := range p.Params {
if p.Default != nil {
switch p.Default.Type {
case v1.ParamTypeArray:
for _, pattern := range paramPatterns {
for i := range len(p.Default.ArrayVal) {
stringReplacements[fmt.Sprintf(pattern+"[%d]", p.Name, i)] = p.Default.ArrayVal[i]
}
arrayReplacements[fmt.Sprintf(pattern, p.Name)] = p.Default.ArrayVal
}
case v1.ParamTypeObject:
for _, pattern := range paramPatterns {
objectReplacements[fmt.Sprintf(pattern, p.Name)] = p.Default.ObjectVal
}
for k, v := range p.Default.ObjectVal {
stringReplacements[fmt.Sprintf(objectIndividualVariablePattern, p.Name, k)] = v
}
case v1.ParamTypeString:
fallthrough
default:
for _, pattern := range paramPatterns {
stringReplacements[fmt.Sprintf(pattern, p.Name)] = p.Default.StringVal
}
}
}
}
// Set and overwrite params with the ones from the PipelineRun
prStrings, prArrays, prObjects := paramsFromPipelineRun(ctx, pr)
for k, v := range prStrings {
stringReplacements[k] = v
}
for k, v := range prArrays {
arrayReplacements[k] = v
}
for k, v := range prObjects {
objectReplacements[k] = v
}
return ApplyReplacements(p, stringReplacements, arrayReplacements, objectReplacements)
}
func paramsFromPipelineRun(ctx context.Context, pr *v1.PipelineRun) (map[string]string, map[string][]string, map[string]map[string]string) {
// stringReplacements is used for standard single-string stringReplacements,
// while arrayReplacements/objectReplacements contains arrays/objects that need to be further processed.
stringReplacements := map[string]string{}
arrayReplacements := map[string][]string{}
objectReplacements := map[string]map[string]string{}
for _, p := range pr.Spec.Params {
switch p.Value.Type {
case v1.ParamTypeArray:
for _, pattern := range paramPatterns {
for i := range len(p.Value.ArrayVal) {
stringReplacements[fmt.Sprintf(pattern+"[%d]", p.Name, i)] = p.Value.ArrayVal[i]
}
arrayReplacements[fmt.Sprintf(pattern, p.Name)] = p.Value.ArrayVal
}
case v1.ParamTypeObject:
for _, pattern := range paramPatterns {
objectReplacements[fmt.Sprintf(pattern, p.Name)] = p.Value.ObjectVal
}
for k, v := range p.Value.ObjectVal {
stringReplacements[fmt.Sprintf(objectIndividualVariablePattern, p.Name, k)] = v
}
case v1.ParamTypeString:
fallthrough
default:
for _, pattern := range paramPatterns {
stringReplacements[fmt.Sprintf(pattern, p.Name)] = p.Value.StringVal
}
}
}
return stringReplacements, arrayReplacements, objectReplacements
}
// GetContextReplacements returns the pipelineRun context which can be used to replace context variables in the specifications
func GetContextReplacements(pipelineName string, pr *v1.PipelineRun) map[string]string {
return map[string]string{
"context.pipelineRun.name": pr.Name,
"context.pipeline.name": pipelineName,
"context.pipelineRun.namespace": pr.Namespace,
"context.pipelineRun.uid": string(pr.ObjectMeta.UID),
}
}
// ApplyContexts applies the substitution from $(context.(pipelineRun|pipeline).*) with the specified values.
// Currently supports only name substitution. Uses "" as a default if name is not specified.
func ApplyContexts(spec *v1.PipelineSpec, pipelineName string, pr *v1.PipelineRun) *v1.PipelineSpec {
for i := range spec.Tasks {
spec.Tasks[i].DisplayName = substitution.ApplyReplacements(spec.Tasks[i].DisplayName, GetContextReplacements(pipelineName, pr))
}
for i := range spec.Finally {
spec.Finally[i].DisplayName = substitution.ApplyReplacements(spec.Finally[i].DisplayName, GetContextReplacements(pipelineName, pr))
}
return ApplyReplacements(spec, GetContextReplacements(pipelineName, pr), map[string][]string{}, map[string]map[string]string{})
}
// filterMatrixContextVar returns a list of params which contain any matrix context variables such as
// $(tasks.<pipelineTaskName>.matrix.length) and $(tasks.<pipelineTaskName>.matrix.<resultName>.length)
func filterMatrixContextVar(params v1.Params) v1.Params {
var filteredParams v1.Params
for _, param := range params {
if expressions, ok := param.GetVarSubstitutionExpressions(); ok {
for _, expression := range expressions {
// tasks.<pipelineTaskName>.matrix.length
// tasks.<pipelineTaskName>.matrix.<resultName>.length
subExpressions := strings.Split(expression, ".")
if len(subExpressions) >= 4 && subExpressions[2] == "matrix" && subExpressions[len(subExpressions)-1] == "length" {
filteredParams = append(filteredParams, param)
}
}
}
}
return filteredParams
}
// ApplyPipelineTaskContexts applies the substitution from $(context.pipelineTask.*) with the specified values.
// Uses "0" as a default if a value is not available as well as matrix context variables
// $(tasks.<pipelineTaskName>.matrix.length) and $(tasks.<pipelineTaskName>.matrix.<resultName>.length)
func ApplyPipelineTaskContexts(pt *v1.PipelineTask, pipelineRunStatus v1.PipelineRunStatus, facts *PipelineRunFacts) *v1.PipelineTask {
pt = pt.DeepCopy()
var pipelineTaskName string
var resultName string
var matrixLength int
replacements := map[string]string{
"context.pipelineTask.retries": strconv.Itoa(pt.Retries),
}
filteredParams := filterMatrixContextVar(pt.Params)
for _, p := range filteredParams {
pipelineTaskName, resultName = p.ParseTaskandResultName()
// find the referenced pipelineTask to count the matrix combinations
if pipelineTaskName != "" && pipelineRunStatus.PipelineSpec != nil {
for _, task := range pipelineRunStatus.PipelineSpec.Tasks {
if task.Name == pipelineTaskName {
matrixLength = task.Matrix.CountCombinations()
replacements["tasks."+pipelineTaskName+".matrix.length"] = strconv.Itoa(matrixLength)
continue
}
}
}
// find the resultName from the ResultsCache
if pipelineTaskName != "" && resultName != "" {
for _, pt := range facts.State {
if pt.PipelineTask.Name == pipelineTaskName {
if len(pt.ResultsCache) == 0 {
pt.ResultsCache = createResultsCacheMatrixedTaskRuns(pt)
}
resultLength := len(pt.ResultsCache[resultName])
replacements["tasks."+pipelineTaskName+".matrix."+resultName+".length"] = strconv.Itoa(resultLength)
continue
}
}
}
}
pt.Params = pt.Params.ReplaceVariables(replacements, map[string][]string{}, map[string]map[string]string{})
if pt.IsMatrixed() {
pt.Matrix.Params = pt.Matrix.Params.ReplaceVariables(replacements, map[string][]string{}, map[string]map[string]string{})
for i := range pt.Matrix.Include {
pt.Matrix.Include[i].Params = pt.Matrix.Include[i].Params.ReplaceVariables(replacements, map[string][]string{}, map[string]map[string]string{})
}
}
pt.DisplayName = substitution.ApplyReplacements(pt.DisplayName, replacements)
return pt
}
// ApplyTaskResults applies the ResolvedResultRef to each PipelineTask.Params and Pipeline.When in targets
func ApplyTaskResults(targets PipelineRunState, resolvedResultRefs ResolvedResultRefs) {
stringReplacements := resolvedResultRefs.getStringReplacements()
arrayReplacements := resolvedResultRefs.getArrayReplacements()
objectReplacements := resolvedResultRefs.getObjectReplacements()
for _, resolvedPipelineRunTask := range targets {
if resolvedPipelineRunTask.PipelineTask != nil {
pipelineTask := resolvedPipelineRunTask.PipelineTask.DeepCopy()
pipelineTask.Params = pipelineTask.Params.ReplaceVariables(stringReplacements, arrayReplacements, objectReplacements)
if pipelineTask.IsMatrixed() {
// Matrixed pipeline results replacements support:
// 1. String replacements from string, array or object results
// 2. array replacements from array results are supported
pipelineTask.Matrix.Params = pipelineTask.Matrix.Params.ReplaceVariables(stringReplacements, arrayReplacements, nil)
for i := range pipelineTask.Matrix.Include {
// matrix include parameters can only be type string
pipelineTask.Matrix.Include[i].Params = pipelineTask.Matrix.Include[i].Params.ReplaceVariables(stringReplacements, nil, nil)
}
}
pipelineTask.When = pipelineTask.When.ReplaceVariables(stringReplacements, arrayReplacements)
if pipelineTask.TaskRef != nil {
if pipelineTask.TaskRef.Params != nil {
pipelineTask.TaskRef.Params = pipelineTask.TaskRef.Params.ReplaceVariables(stringReplacements, arrayReplacements, objectReplacements)
}
pipelineTask.TaskRef.Name = substitution.ApplyReplacements(pipelineTask.TaskRef.Name, stringReplacements)
}
pipelineTask.DisplayName = substitution.ApplyReplacements(pipelineTask.DisplayName, stringReplacements)
for i, workspace := range pipelineTask.Workspaces {
pipelineTask.Workspaces[i].SubPath = substitution.ApplyReplacements(workspace.SubPath, stringReplacements)
}
resolvedPipelineRunTask.PipelineTask = pipelineTask
}
}
}
// ApplyPipelineTaskStateContext replaces context variables referring to execution status with the specified status
func ApplyPipelineTaskStateContext(state PipelineRunState, replacements map[string]string) {
for _, resolvedPipelineRunTask := range state {
if resolvedPipelineRunTask.PipelineTask != nil {
pipelineTask := resolvedPipelineRunTask.PipelineTask.DeepCopy()
pipelineTask.Params = pipelineTask.Params.ReplaceVariables(replacements, nil, nil)
pipelineTask.When = pipelineTask.When.ReplaceVariables(replacements, nil)
if pipelineTask.TaskRef != nil {
if pipelineTask.TaskRef.Params != nil {
pipelineTask.TaskRef.Params = pipelineTask.TaskRef.Params.ReplaceVariables(replacements, nil, nil)
}
pipelineTask.TaskRef.Name = substitution.ApplyReplacements(pipelineTask.TaskRef.Name, replacements)
}
pipelineTask.DisplayName = substitution.ApplyReplacements(pipelineTask.DisplayName, replacements)
resolvedPipelineRunTask.PipelineTask = pipelineTask
}
}
}
// ApplyWorkspaces replaces workspace variables in the given pipeline spec with their
// concrete values.
func ApplyWorkspaces(p *v1.PipelineSpec, pr *v1.PipelineRun) *v1.PipelineSpec {
p = p.DeepCopy()
replacements := map[string]string{}
for _, declaredWorkspace := range p.Workspaces {
key := fmt.Sprintf("workspaces.%s.bound", declaredWorkspace.Name)
replacements[key] = "false"
}
for _, boundWorkspace := range pr.Spec.Workspaces {
key := fmt.Sprintf("workspaces.%s.bound", boundWorkspace.Name)
replacements[key] = "true"
}
return ApplyReplacements(p, replacements, map[string][]string{}, map[string]map[string]string{})
}
// replaceVariablesInPipelineTasks handles variable replacement for a slice of PipelineTasks in-place
func replaceVariablesInPipelineTasks(tasks []v1.PipelineTask, replacements map[string]string,
arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) {
for i := range tasks {
tasks[i].Params = tasks[i].Params.ReplaceVariables(replacements, arrayReplacements, objectReplacements)
if tasks[i].IsMatrixed() {
tasks[i].Matrix.Params = tasks[i].Matrix.Params.ReplaceVariables(replacements, arrayReplacements, nil)
for j := range tasks[i].Matrix.Include {
tasks[i].Matrix.Include[j].Params = tasks[i].Matrix.Include[j].Params.ReplaceVariables(replacements, nil, nil)
}
} else {
tasks[i].DisplayName = substitution.ApplyReplacements(tasks[i].DisplayName, replacements)
}
for j := range tasks[i].Workspaces {
tasks[i].Workspaces[j].SubPath = substitution.ApplyReplacements(tasks[i].Workspaces[j].SubPath, replacements)
}
tasks[i].When = tasks[i].When.ReplaceVariables(replacements, arrayReplacements)
if tasks[i].TaskRef != nil {
if tasks[i].TaskRef.Params != nil {
tasks[i].TaskRef.Params = tasks[i].TaskRef.Params.ReplaceVariables(replacements, arrayReplacements, objectReplacements)
}
tasks[i].TaskRef.Name = substitution.ApplyReplacements(tasks[i].TaskRef.Name, replacements)
}
tasks[i].OnError = v1.PipelineTaskOnErrorType(substitution.ApplyReplacements(string(tasks[i].OnError), replacements))
tasks[i] = propagateParams(tasks[i], replacements, arrayReplacements, objectReplacements)
}
}
// ApplyReplacements replaces placeholders for declared parameters with the specified replacements.
func ApplyReplacements(p *v1.PipelineSpec, replacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) *v1.PipelineSpec {
p = p.DeepCopy()
// Replace variables in Tasks and Finally tasks
replaceVariablesInPipelineTasks(p.Tasks, replacements, arrayReplacements, objectReplacements)
replaceVariablesInPipelineTasks(p.Finally, replacements, arrayReplacements, objectReplacements)
return p
}
// propagateParams returns a Pipeline Task spec that is the same as the input Pipeline Task spec, but with
// all parameter replacements from `stringReplacements`, `arrayReplacements`, and `objectReplacements` substituted.
// It does not modify `stringReplacements`, `arrayReplacements`, or `objectReplacements`.
func propagateParams(t v1.PipelineTask, stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) v1.PipelineTask {
if t.TaskSpec == nil {
return t
}
// check if there are task parameters defined that match the params at pipeline level
if len(t.Params) > 0 {
stringReplacementsDup := make(map[string]string)
arrayReplacementsDup := make(map[string][]string)
objectReplacementsDup := make(map[string]map[string]string)
for k, v := range stringReplacements {
stringReplacementsDup[k] = v
}
for k, v := range arrayReplacements {
arrayReplacementsDup[k] = v
}
for k, v := range objectReplacements {
objectReplacementsDup[k] = v
}
for _, par := range t.Params {
for _, pattern := range paramPatterns {
checkName := fmt.Sprintf(pattern, par.Name)
// Scoping. Task Params will replace Pipeline Params
if _, ok := stringReplacementsDup[checkName]; ok {
stringReplacementsDup[checkName] = par.Value.StringVal
}
if _, ok := arrayReplacementsDup[checkName]; ok {
arrayReplacementsDup[checkName] = par.Value.ArrayVal
}
if _, ok := objectReplacementsDup[checkName]; ok {
objectReplacementsDup[checkName] = par.Value.ObjectVal
for k, v := range par.Value.ObjectVal {
stringReplacementsDup[fmt.Sprintf(objectIndividualVariablePattern, par.Name, k)] = v
}
}
}
}
t.TaskSpec.TaskSpec = *resources.ApplyReplacements(&t.TaskSpec.TaskSpec, stringReplacementsDup, arrayReplacementsDup, objectReplacementsDup)
} else {
t.TaskSpec.TaskSpec = *resources.ApplyReplacements(&t.TaskSpec.TaskSpec, stringReplacements, arrayReplacements, objectReplacements)
}
return t
}
// ApplyResultsToWorkspaceBindings applies results from TaskRuns to WorkspaceBindings in a PipelineRun. It replaces placeholders in
// various binding types with values from TaskRun results.
func ApplyResultsToWorkspaceBindings(trResults map[string][]v1.TaskRunResult, pr *v1.PipelineRun) {
stringReplacements := map[string]string{}
for taskName, taskResults := range trResults {
for _, res := range taskResults {
switch res.Type {
case v1.ResultsTypeString:
stringReplacements[fmt.Sprintf("tasks.%s.results.%s", taskName, res.Name)] = res.Value.StringVal
case v1.ResultsTypeArray:
continue
case v1.ResultsTypeObject:
for k, v := range res.Value.ObjectVal {
stringReplacements[fmt.Sprintf("tasks.%s.results.%s.%s", taskName, res.Name, k)] = v
}
}
}
}
pr.Spec.Workspaces = workspace.ReplaceWorkspaceBindingsVars(pr.Spec.Workspaces, stringReplacements)
}
// PropagateResults propagate the result of the completed task to the unfinished task that is not explicitly specify in the params
func PropagateResults(rpt *ResolvedPipelineTask, runStates PipelineRunState) {
if rpt.ResolvedTask == nil || rpt.ResolvedTask.TaskSpec == nil {
return
}
stringReplacements := map[string]string{}
arrayReplacements := map[string][]string{}
for taskName, taskResults := range runStates.GetTaskRunsResults() {
for _, res := range taskResults {
switch res.Type {
case v1.ResultsTypeString:
stringReplacements[fmt.Sprintf("tasks.%s.results.%s", taskName, res.Name)] = res.Value.StringVal
case v1.ResultsTypeArray:
arrayReplacements[fmt.Sprintf("tasks.%s.results.%s", taskName, res.Name)] = res.Value.ArrayVal
case v1.ResultsTypeObject:
for k, v := range res.Value.ObjectVal {
stringReplacements[fmt.Sprintf("tasks.%s.results.%s.%s", taskName, res.Name, k)] = v
}
}
}
}
rpt.ResolvedTask.TaskSpec = resources.ApplyReplacements(rpt.ResolvedTask.TaskSpec, stringReplacements, arrayReplacements, map[string]map[string]string{})
}
// PropagateArtifacts propagates artifact values from previous task runs into the TaskSpec of the current task.
func PropagateArtifacts(rpt *ResolvedPipelineTask, runStates PipelineRunState) error {
if rpt.ResolvedTask == nil || rpt.ResolvedTask.TaskSpec == nil {
return nil
}
stringReplacements := map[string]string{}
for taskName, artifacts := range runStates.GetTaskRunsArtifacts() {
if artifacts != nil {
for i, input := range artifacts.Inputs {
ib, err := json.Marshal(input.Values)
if err != nil {
return err
}
stringReplacements[fmt.Sprintf("tasks.%s.inputs.%s", taskName, input.Name)] = string(ib)
if i == 0 {
stringReplacements[fmt.Sprintf("tasks.%s.inputs", taskName)] = string(ib)
}
}
for i, output := range artifacts.Outputs {
ob, err := json.Marshal(output.Values)
if err != nil {
return err
}
stringReplacements[fmt.Sprintf("tasks.%s.outputs.%s", taskName, output.Name)] = string(ob)
if i == 0 {
stringReplacements[fmt.Sprintf("tasks.%s.outputs", taskName)] = string(ob)
}
}
}
}
rpt.ResolvedTask.TaskSpec = resources.ApplyReplacements(rpt.ResolvedTask.TaskSpec, stringReplacements, map[string][]string{}, map[string]map[string]string{})
return nil
}
// ApplyTaskResultsToPipelineResults applies the results of completed TasksRuns and Runs to a Pipeline's
// list of PipelineResults, returning the computed set of PipelineRunResults. References to
// non-existent TaskResults or failed TaskRuns or Runs result in a PipelineResult being considered invalid
// and omitted from the returned slice. A nil slice is returned if no results are passed in or all
// results are invalid.
func ApplyTaskResultsToPipelineResults(
_ context.Context,
results []v1.PipelineResult,
taskRunResults map[string][]v1.TaskRunResult,
customTaskResults map[string][]v1beta1.CustomRunResult,
taskstatus map[string]string,
) ([]v1.PipelineRunResult, error) {
var runResults []v1.PipelineRunResult
var invalidPipelineResults []string
stringReplacements := map[string]string{}
arrayReplacements := map[string][]string{}
objectReplacements := map[string]map[string]string{}
for _, pipelineResult := range results {
variablesInPipelineResult, _ := pipelineResult.GetVarSubstitutionExpressions()
if len(variablesInPipelineResult) == 0 {
continue
}
validPipelineResult := true
for _, variable := range variablesInPipelineResult {
if _, isMemoized := stringReplacements[variable]; isMemoized {
continue
}
if _, isMemoized := arrayReplacements[variable]; isMemoized {
continue
}
if _, isMemoized := objectReplacements[variable]; isMemoized {
continue
}
variableParts := strings.Split(variable, ".")
if (variableParts[0] != v1.ResultTaskPart && variableParts[0] != v1.ResultFinallyPart) || variableParts[2] != v1beta1.ResultResultPart {
validPipelineResult = false
invalidPipelineResults = append(invalidPipelineResults, pipelineResult.Name)
continue
}
switch len(variableParts) {
// For string result: tasks.<taskName>.results.<stringResultName>
// For array result: tasks.<taskName>.results.<arrayResultName>[*], tasks.<taskName>.results.<arrayResultName>[i]
// For object result: tasks.<taskName>.results.<objectResultName>[*],
case resultsParseNumber:
taskName, resultName := variableParts[1], variableParts[3]
resultName, stringIdx := v1.ParseResultName(resultName)
if resultValue := taskResultValue(taskName, resultName, taskRunResults); resultValue != nil {
switch resultValue.Type {
case v1.ParamTypeString:
stringReplacements[variable] = resultValue.StringVal
case v1.ParamTypeArray:
if stringIdx != "*" {
intIdx, _ := strconv.Atoi(stringIdx)
if intIdx < len(resultValue.ArrayVal) {
stringReplacements[variable] = resultValue.ArrayVal[intIdx]
} else {
// referred array index out of bound
invalidPipelineResults = append(invalidPipelineResults, pipelineResult.Name)
validPipelineResult = false
}
} else {
arrayReplacements[substitution.StripStarVarSubExpression(variable)] = resultValue.ArrayVal
}
case v1.ParamTypeObject:
objectReplacements[substitution.StripStarVarSubExpression(variable)] = resultValue.ObjectVal
}
} else if resultValue := runResultValue(taskName, resultName, customTaskResults); resultValue != nil {
stringReplacements[variable] = *resultValue
} else {
// if the task is not successful (e.g. skipped or failed) and the results is missing, don't return error
if status, ok := taskstatus[PipelineTaskStatusPrefix+taskName+PipelineTaskStatusSuffix]; ok {
if status != v1.TaskRunReasonSuccessful.String() {
validPipelineResult = false
continue
}
}
// referred result name is not existent
invalidPipelineResults = append(invalidPipelineResults, pipelineResult.Name)
validPipelineResult = false
}
// For object type result: tasks.<taskName>.results.<objectResultName>.<individualAttribute>
case objectElementResultsParseNumber:
taskName, resultName, objectKey := variableParts[1], variableParts[3], variableParts[4]
resultName, _ = v1.ParseResultName(resultName)
if resultValue := taskResultValue(taskName, resultName, taskRunResults); resultValue != nil {
if _, ok := resultValue.ObjectVal[objectKey]; ok {
stringReplacements[variable] = resultValue.ObjectVal[objectKey]
} else {
// referred object key is not existent
invalidPipelineResults = append(invalidPipelineResults, pipelineResult.Name)
validPipelineResult = false
}
} else {
// if the task is not successful (e.g. skipped or failed) and the results is missing, don't return error
if status, ok := taskstatus[PipelineTaskStatusPrefix+taskName+PipelineTaskStatusSuffix]; ok {
if status != v1.TaskRunReasonSuccessful.String() {
validPipelineResult = false
continue
}
}
// referred result name is not existent
invalidPipelineResults = append(invalidPipelineResults, pipelineResult.Name)
validPipelineResult = false
}
default:
invalidPipelineResults = append(invalidPipelineResults, pipelineResult.Name)
validPipelineResult = false
}
}
if validPipelineResult {
finalValue := pipelineResult.Value
finalValue.ApplyReplacements(stringReplacements, arrayReplacements, objectReplacements)
runResults = append(runResults, v1.PipelineRunResult{
Name: pipelineResult.Name,
Value: finalValue,
})
}
}
if len(invalidPipelineResults) > 0 {
return runResults, fmt.Errorf("invalid pipelineresults %v, the referenced results don't exist", invalidPipelineResults)
}
return runResults, nil
}
// taskResultValue returns the result value for a given pipeline task name and result name in a map of TaskRunResults for
// pipeline task names. It returns nil if either the pipeline task name isn't present in the map, or if there is no
// result with the result name in the pipeline task name's slice of results.
func taskResultValue(taskName string, resultName string, taskResults map[string][]v1.TaskRunResult) *v1.ResultValue {
for _, trResult := range taskResults[taskName] {
if trResult.Name == resultName {
return &trResult.Value
}
}
return nil
}
// runResultValue returns the result value for a given pipeline task name and result name in a map of RunResults for
// pipeline task names. It returns nil if either the pipeline task name isn't present in the map, or if there is no
// result with the result name in the pipeline task name's slice of results.
func runResultValue(taskName string, resultName string, runResults map[string][]v1beta1.CustomRunResult) *string {
for _, runResult := range runResults[taskName] {
if runResult.Name == resultName {
return &runResult.Value
}
}
return nil
}
// ApplyParametersToWorkspaceBindings applies parameters from PipelineSpec and PipelineRun to the WorkspaceBindings in a PipelineRun. It replaces
// placeholders in various binding types with values from provided parameters.
func ApplyParametersToWorkspaceBindings(ctx context.Context, pr *v1.PipelineRun) {
parameters, _, _ := paramsFromPipelineRun(ctx, pr)
pr.Spec.Workspaces = workspace.ReplaceWorkspaceBindingsVars(pr.Spec.Workspaces, parameters)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"context"
"errors"
"fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
resolutionV1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
"github.com/tektoncd/pipeline/pkg/reconciler/apiserver"
rprp "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinespec"
"github.com/tektoncd/pipeline/pkg/remote"
"github.com/tektoncd/pipeline/pkg/remoteresolution/remote/resolution"
remoteresource "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
"github.com/tektoncd/pipeline/pkg/substitution"
"github.com/tektoncd/pipeline/pkg/trustedresources"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
)
// GetPipelineFunc is a factory function that will use the given PipelineRef to return a valid GetPipeline function that
// looks up the pipeline. It uses as context a k8s client, tekton client, namespace, and service account name to return
// the pipeline. It knows whether it needs to look in the cluster or in a remote location to fetch the reference.
// OCI bundle and remote resolution pipelines will be verified by trusted resources if the feature is enabled
func GetPipelineFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset.Interface, requester remoteresource.Requester, pipelineRun *v1.PipelineRun, verificationPolicies []*v1alpha1.VerificationPolicy) rprp.GetPipeline {
pr := pipelineRun.Spec.PipelineRef
namespace := pipelineRun.Namespace
// if the spec is already in the status, do not try to fetch it again, just use it as source of truth.
// Same for the RefSource field in the Status.Provenance.
if pipelineRun.Status.PipelineSpec != nil {
return func(_ context.Context, name string) (*v1.Pipeline, *v1.RefSource, *trustedresources.VerificationResult, error) {
var refSource *v1.RefSource
if pipelineRun.Status.Provenance != nil {
refSource = pipelineRun.Status.Provenance.RefSource
}
return &v1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: *pipelineRun.Status.PipelineSpec,
}, refSource, nil, nil
}
}
switch {
case pr != nil && pr.Resolver != "" && requester != nil:
return func(ctx context.Context, name string) (*v1.Pipeline, *v1.RefSource, *trustedresources.VerificationResult, error) {
stringReplacements, arrayReplacements, objectReplacements := paramsFromPipelineRun(ctx, pipelineRun)
for k, v := range GetContextReplacements("", pipelineRun) {
stringReplacements[k] = v
}
replacedParams := pr.Params.ReplaceVariables(stringReplacements, arrayReplacements, objectReplacements)
var url string
// The name is url-like so its not a local reference.
if err := v1.RefNameLikeUrl(pr.Name); err == nil {
// apply variable replacements in the name.
pr.Name = substitution.ApplyReplacements(pr.Name, stringReplacements)
url = pr.Name
}
resolverPayload := remoteresource.ResolverPayload{
ResolutionSpec: &resolutionV1beta1.ResolutionRequestSpec{
Params: replacedParams,
URL: url,
},
}
resolver := resolution.NewResolver(requester, pipelineRun, string(pr.Resolver), resolverPayload)
return resolvePipeline(ctx, resolver, name, namespace, k8s, tekton, verificationPolicies)
}
default:
// Even if there is no pipeline ref, we should try to return a local resolver.
local := &LocalPipelineRefResolver{
Namespace: namespace,
Tektonclient: tekton,
}
return local.GetPipeline
}
}
// LocalPipelineRefResolver uses the current cluster to resolve a pipeline reference.
type LocalPipelineRefResolver struct {
Namespace string
Tektonclient clientset.Interface
}
// GetPipeline will resolve a Pipeline from the local cluster using a versioned Tekton client. It will
// return an error if it can't find an appropriate Pipeline for any reason.
// TODO: if we want to set RefSource for in-cluster pipeline, set it here.
// https://github.com/tektoncd/pipeline/issues/5522
// TODO(#6666): Support local resources verification
func (l *LocalPipelineRefResolver) GetPipeline(ctx context.Context, name string) (*v1.Pipeline, *v1.RefSource, *trustedresources.VerificationResult, error) {
// If we are going to resolve this reference locally, we need a namespace scope.
if l.Namespace == "" {
return nil, nil, nil, fmt.Errorf("must specify namespace to resolve reference to pipeline %s", name)
}
pipeline, err := l.Tektonclient.TektonV1().Pipelines(l.Namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, nil, nil, fmt.Errorf("tekton client cannot get pipeline %s from local cluster: %w", name, err)
}
return pipeline, nil, nil, nil
}
// resolvePipeline accepts an impl of remote.Resolver and attempts to
// fetch a pipeline with given name and verify the v1beta1 pipeline if trusted resources is enabled.
// An error is returned if the remoteresource doesn't work
// A VerificationResult is returned if trusted resources is enabled, VerificationResult contains the result type and err.
// or the returned data isn't a valid *v1.Pipeline.
func resolvePipeline(ctx context.Context, resolver remote.Resolver, name string, namespace string, k8s kubernetes.Interface, tekton clientset.Interface, verificationPolicies []*v1alpha1.VerificationPolicy) (*v1.Pipeline, *v1.RefSource, *trustedresources.VerificationResult, error) {
obj, refSource, err := resolver.Get(ctx, "pipeline", name)
if err != nil {
return nil, nil, nil, fmt.Errorf("resolver failed to get Pipeline %s: %w", name, err)
}
pipelineObj, vr, err := readRuntimeObjectAsPipeline(ctx, namespace, obj, k8s, tekton, refSource, verificationPolicies)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to read runtime object as Pipeline: %w", err)
}
return pipelineObj, refSource, vr, nil
}
// readRuntimeObjectAsPipeline tries to convert a generic runtime.Object
// into a *v1.Pipeline type so that its meta and spec fields
// can be read. v1 object will be converted to v1beta1 and returned.
// v1beta1 Pipeline will be verified if trusted resources is enabled
// A VerificationResult is returned if trusted resources is enabled, VerificationResult contains the result type and err.
// An error is returned if the given object is not a
// PipelineObject or if there is an error validating or upgrading an
// older PipelineObject into its v1beta1 equivalent.
// TODO(#5541): convert v1beta1 obj to v1 once we use v1 as the stored version
func readRuntimeObjectAsPipeline(ctx context.Context, namespace string, obj runtime.Object, k8s kubernetes.Interface, tekton clientset.Interface, refSource *v1.RefSource, verificationPolicies []*v1alpha1.VerificationPolicy) (*v1.Pipeline, *trustedresources.VerificationResult, error) {
switch obj := obj.(type) {
case *v1beta1.Pipeline:
obj.SetDefaults(ctx)
// Cleanup object from things we don't care about
// FIXME: extract this in a function
obj.ObjectMeta.OwnerReferences = nil
// Verify the Pipeline once we fetch from the remote resolution, mutating, validation and conversion of the pipeline should happen after the verification, since signatures are based on the remote pipeline contents
vr := trustedresources.VerifyResource(ctx, obj, k8s, refSource, verificationPolicies)
// Issue a dry-run request to create the remote Pipeline, so that it can undergo validation from validating admission webhooks
// and mutation from mutating admission webhooks without actually creating the Pipeline on the cluster
o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
if err != nil {
return nil, nil, err
}
if mutatedPipeline, ok := o.(*v1beta1.Pipeline); ok {
mutatedPipeline.ObjectMeta = obj.ObjectMeta
p := &v1.Pipeline{
TypeMeta: metav1.TypeMeta{
Kind: "Pipeline",
APIVersion: "tekton.dev/v1",
},
}
if err := mutatedPipeline.ConvertTo(ctx, p); err != nil {
return nil, nil, fmt.Errorf("failed to convert v1beta1 obj %s into v1 Pipeline", mutatedPipeline.GetObjectKind().GroupVersionKind().String())
}
return p, &vr, nil
}
case *v1.Pipeline:
// Cleanup object from things we don't care about
// FIXME: extract this in a function
obj.ObjectMeta.OwnerReferences = nil
// This SetDefaults is currently not necessary, but for consistency, it is recommended to add it.
// Avoid forgetting to add it in the future when there is a v2 version, causing similar problems.
obj.SetDefaults(ctx)
vr := trustedresources.VerifyResource(ctx, obj, k8s, refSource, verificationPolicies)
o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
if err != nil {
return nil, nil, err
}
if mutatedPipeline, ok := o.(*v1.Pipeline); ok {
mutatedPipeline.ObjectMeta = obj.ObjectMeta
return mutatedPipeline, &vr, nil
}
}
return nil, nil, errors.New("resource is not a pipeline")
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"github.com/google/cel-go/cel"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources"
"github.com/tektoncd/pipeline/pkg/remote"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resource"
"github.com/tektoncd/pipeline/pkg/substitution"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"knative.dev/pkg/apis"
"knative.dev/pkg/kmeta"
)
const (
// ReasonConditionCheckFailed indicates that the reason for the failure status is that the
// condition check associated to the pipeline task evaluated to false
ReasonConditionCheckFailed = "ConditionCheckFailed"
)
// TaskSkipStatus stores whether a task was skipped and why
type TaskSkipStatus struct {
IsSkipped bool
SkippingReason v1.SkippingReason
}
// TaskNotFoundError indicates that the resolution failed because a referenced Task couldn't be retrieved
type TaskNotFoundError struct {
Name string
Err error
}
func (e *TaskNotFoundError) Error() string {
return fmt.Sprintf("Couldn't retrieve Task %q: %s", e.Name, e.Err.Error())
}
func (e *TaskNotFoundError) Unwrap() error {
return e.Err
}
// ResolvedPipelineTask contains a PipelineTask and its associated child PipelineRun(s) (Pipelines-in-Pipelines), TaskRun(s) or CustomRuns, if they exist.
type ResolvedPipelineTask struct {
ChildPipelineRunNames []string
ChildPipelineRuns []*v1.PipelineRun
ResolvedPipeline ResolvedPipeline
TaskRunNames []string
TaskRuns []*v1.TaskRun
ResolvedTask *resources.ResolvedTask
// If the PipelineTask is a Custom Task, CustomRunName and CustomRun will be set.
CustomTask bool
CustomRunNames []string
CustomRuns []*v1beta1.CustomRun
PipelineTask *v1.PipelineTask
ResultsCache map[string][]string
// EvaluatedCEL is used to store the results of evaluated CEL expression
EvaluatedCEL map[string]bool
}
// EvaluateCEL evaluate the CEL expressions, and store the evaluated results in EvaluatedCEL
func (t *ResolvedPipelineTask) EvaluateCEL() error {
if t.PipelineTask != nil {
// Each call to this function will reset this field to prevent additional CELs.
t.EvaluatedCEL = make(map[string]bool)
for _, we := range t.PipelineTask.When {
if we.CEL == "" {
continue
}
_, ok := t.EvaluatedCEL[we.CEL]
if !ok {
// Create a program environment configured with the standard library of CEL functions and macros
// The error is omitted because not environment declarations are passed in.
env, _ := cel.NewEnv()
// Parse and Check the CEL to get the Abstract Syntax Tree
ast, iss := env.Compile(we.CEL)
if iss.Err() != nil {
return iss.Err()
}
// Generate an evaluatable instance of the Ast within the environment
prg, err := env.Program(ast)
if err != nil {
return err
}
// Evaluate the CEL expression
out, _, err := prg.Eval(map[string]interface{}{})
if err != nil {
return err
}
b, ok := out.Value().(bool)
if ok {
t.EvaluatedCEL[we.CEL] = b
} else {
return fmt.Errorf("The CEL expression %s is not evaluated to a boolean", we.CEL)
}
}
}
}
return nil
}
// isDone returns true only if the task is skipped, succeeded or failed
func (t ResolvedPipelineTask) isDone(facts *PipelineRunFacts) bool {
return t.Skip(facts).IsSkipped || t.isSuccessful() || t.isFailure() || t.isValidationFailed(facts.ValidationFailedTask)
}
// IsRunning returns true only if the task is neither succeeded, cancelled nor failed
func (t ResolvedPipelineTask) IsRunning() bool {
if t.IsCustomTask() && len(t.CustomRuns) == 0 {
return false
}
if !t.IsCustomTask() && len(t.TaskRuns) == 0 {
return false
}
return !t.isSuccessful() && !t.isFailure()
}
// IsCustomTask returns true if the PipelineTask references a Custom Task.
func (t ResolvedPipelineTask) IsCustomTask() bool {
return t.CustomTask
}
// IsChildPipeline returns true if the PipelineTask references a child Pipeline.
func (t ResolvedPipelineTask) IsChildPipeline() bool {
return t.PipelineTask.PipelineSpec != nil
}
// getReason returns the latest reason if the run has completed successfully
// If the PipelineTask has a Matrix, getReason returns the failure reason for any failure
// otherwise, it returns an empty string
func (t ResolvedPipelineTask) getReason() string {
if t.IsChildPipeline() {
if len(t.ChildPipelineRuns) == 0 {
return ""
}
for _, childPipelineRun := range t.ChildPipelineRuns {
if !childPipelineRun.IsSuccessful() && len(childPipelineRun.Status.Conditions) >= 1 {
return childPipelineRun.Status.Conditions[0].Reason
}
}
if len(t.ChildPipelineRuns) >= 1 && len(t.ChildPipelineRuns[0].Status.Conditions) >= 1 {
return t.ChildPipelineRuns[0].Status.Conditions[0].Reason
}
}
if t.IsCustomTask() {
if len(t.CustomRuns) == 0 {
return ""
}
for _, run := range t.CustomRuns {
if !run.IsSuccessful() && len(run.Status.Conditions) >= 1 {
return run.Status.Conditions[0].Reason
}
}
if len(t.CustomRuns) >= 1 && len(t.CustomRuns[0].Status.Conditions) >= 1 {
return t.CustomRuns[0].Status.Conditions[0].Reason
}
}
if len(t.TaskRuns) == 0 {
return ""
}
for _, taskRun := range t.TaskRuns {
if !taskRun.IsSuccessful() && len(taskRun.Status.Conditions) >= 1 {
return taskRun.Status.Conditions[0].Reason
}
}
if len(t.TaskRuns) >= 1 && len(t.TaskRuns[0].Status.Conditions) >= 1 {
return t.TaskRuns[0].Status.Conditions[0].Reason
}
return ""
}
// isSuccessful returns true only if the run has completed successfully
// If the PipelineTask has a Matrix, isSuccessful returns true if all runs have completed successfully
func (t ResolvedPipelineTask) isSuccessful() bool {
if t.IsChildPipeline() {
if len(t.ChildPipelineRuns) == 0 {
return false
}
for _, childPipelineRun := range t.ChildPipelineRuns {
if !childPipelineRun.IsSuccessful() {
return false
}
}
return true
}
if t.IsCustomTask() {
if len(t.CustomRuns) == 0 {
return false
}
for _, run := range t.CustomRuns {
if !run.IsSuccessful() {
return false
}
}
return true
}
if len(t.TaskRuns) == 0 {
return false
}
for _, taskRun := range t.TaskRuns {
if !taskRun.IsSuccessful() {
return false
}
}
return true
}
// isFailure returns true only if the run has failed (if it has ConditionSucceeded = False).
// If the PipelineTask has a Matrix, isFailure returns true if any run has failed and all other runs are done.
func (t ResolvedPipelineTask) isFailure() bool {
var isDone bool
if t.IsChildPipeline() {
if len(t.ChildPipelineRuns) == 0 {
return false
}
isDone = true
for _, childPipelineRun := range t.ChildPipelineRuns {
isDone = isDone && childPipelineRun.IsDone()
}
return t.haveAnyChildPipelineRunsFailed() && isDone
}
if t.IsCustomTask() {
if len(t.CustomRuns) == 0 {
return false
}
isDone = true
for _, run := range t.CustomRuns {
isDone = isDone && run.IsDone()
}
return t.haveAnyRunsFailed() && isDone
}
if len(t.TaskRuns) == 0 {
return false
}
isDone = true
for _, taskRun := range t.TaskRuns {
isDone = isDone && taskRun.IsDone()
}
return t.haveAnyTaskRunsFailed() && isDone
}
// isValidationFailed return true if the task is failed at the validation step
func (t ResolvedPipelineTask) isValidationFailed(ftasks []*ResolvedPipelineTask) bool {
for _, ftask := range ftasks {
if ftask.ResolvedTask == t.ResolvedTask {
return true
}
}
return false
}
// isCancelledForTimeOut returns true only if the run is cancelled due to PipelineRun-controlled timeout
// If the PipelineTask has a Matrix, isCancelled returns true if any run is cancelled due to PipelineRun-controlled timeout and all other runs are done.
func (t ResolvedPipelineTask) isCancelledForTimeOut() bool {
if t.IsCustomTask() {
if len(t.CustomRuns) == 0 {
return false
}
isDone := true
atLeastOneCancelled := false
for _, run := range t.CustomRuns {
isDone = isDone && run.IsDone()
c := run.GetStatusCondition().GetCondition(apis.ConditionSucceeded)
runCancelled := c.IsFalse() &&
c.Reason == v1beta1.CustomRunReasonCancelled.String() &&
isCustomRunCancelledByPipelineRunTimeout(run)
atLeastOneCancelled = atLeastOneCancelled || runCancelled
}
return atLeastOneCancelled && isDone
}
if len(t.TaskRuns) == 0 {
return false
}
isDone := true
atLeastOneCancelled := false
for _, taskRun := range t.TaskRuns {
isDone = isDone && taskRun.IsDone()
c := taskRun.Status.GetCondition(apis.ConditionSucceeded)
taskRunCancelled := c.IsFalse() &&
c.Reason == v1beta1.TaskRunReasonCancelled.String() &&
taskRun.Spec.StatusMessage == v1.TaskRunCancelledByPipelineTimeoutMsg
atLeastOneCancelled = atLeastOneCancelled || taskRunCancelled
}
return atLeastOneCancelled && isDone
}
// isCancelled returns true only if the run is cancelled
// If the PipelineTask has a Matrix, isCancelled returns true if any run is cancelled and all other runs are done.
func (t ResolvedPipelineTask) isCancelled() bool {
if t.IsCustomTask() {
if len(t.CustomRuns) == 0 {
return false
}
isDone := true
atLeastOneCancelled := false
for _, run := range t.CustomRuns {
isDone = isDone && run.IsDone()
c := run.GetStatusCondition().GetCondition(apis.ConditionSucceeded)
runCancelled := c.IsFalse() && c.Reason == v1beta1.CustomRunReasonCancelled.String()
atLeastOneCancelled = atLeastOneCancelled || runCancelled
}
return atLeastOneCancelled && isDone
}
if len(t.TaskRuns) == 0 {
return false
}
isDone := true
atLeastOneCancelled := false
for _, taskRun := range t.TaskRuns {
isDone = isDone && taskRun.IsDone()
c := taskRun.Status.GetCondition(apis.ConditionSucceeded)
taskRunCancelled := c.IsFalse() && c.Reason == v1beta1.TaskRunReasonCancelled.String()
atLeastOneCancelled = atLeastOneCancelled || taskRunCancelled
}
return atLeastOneCancelled && isDone
}
// isScheduled returns true when the PipelineRunTask itself has any TaskRuns/CustomRuns
// or a singular TaskRun/CustomRun associated.
func (t ResolvedPipelineTask) isScheduled() bool {
if t.IsCustomTask() {
return len(t.CustomRuns) > 0
}
return len(t.TaskRuns) > 0
}
// haveAnyRunsFailed returns true when any of the child PipelineRuns/TaskRuns/CustomRuns have succeeded condition with status set to false
func (t ResolvedPipelineTask) haveAnyRunsFailed() bool {
if t.IsChildPipeline() {
return t.haveAnyChildPipelineRunsFailed()
}
if t.IsCustomTask() {
return t.haveAnyCustomRunsFailed()
}
return t.haveAnyTaskRunsFailed()
}
// haveAnyChildPipelineRunsFailed returns true when any of the child PipelineRuns have succeeded condition with status set to false
func (t ResolvedPipelineTask) haveAnyChildPipelineRunsFailed() bool {
for _, childPipelineRun := range t.ChildPipelineRuns {
if childPipelineRun.IsFailure() {
return true
}
}
return false
}
// haveAnyTaskRunsFailed returns true when any of the TaskRuns have succeeded condition with status set to false
func (t ResolvedPipelineTask) haveAnyTaskRunsFailed() bool {
for _, taskRun := range t.TaskRuns {
if taskRun.IsFailure() {
return true
}
}
return false
}
// haveAnyCustomRunsFailed returns true when any of the CustomRuns have succeeded condition with status set to false
func (t ResolvedPipelineTask) haveAnyCustomRunsFailed() bool {
for _, customRun := range t.CustomRuns {
if customRun.IsFailure() {
return true
}
}
return false
}
func (t *ResolvedPipelineTask) checkParentsDone(facts *PipelineRunFacts) bool {
if facts.isFinalTask(t.PipelineTask.Name) {
return true
}
stateMap := facts.State.ToMap()
node := facts.TasksGraph.Nodes[t.PipelineTask.Name]
for _, p := range node.Prev {
if !stateMap[p.Key].isDone(facts) {
return false
}
}
return true
}
func (t *ResolvedPipelineTask) skip(facts *PipelineRunFacts) TaskSkipStatus {
var skippingReason v1.SkippingReason
switch {
case facts.isFinalTask(t.PipelineTask.Name) || t.isScheduled() || t.isValidationFailed(facts.ValidationFailedTask):
skippingReason = v1.None
case facts.IsStopping():
skippingReason = v1.StoppingSkip
case facts.IsGracefullyCancelled():
skippingReason = v1.GracefullyCancelledSkip
case facts.IsGracefullyStopped():
skippingReason = v1.GracefullyStoppedSkip
case t.skipBecauseWhenExpressionsEvaluatedToFalse(facts):
skippingReason = v1.WhenExpressionsSkip
case t.skipBecauseParentTaskWasSkipped(facts):
skippingReason = v1.ParentTasksSkip
case t.skipBecauseResultReferencesAreMissing(facts):
skippingReason = v1.MissingResultsSkip
case t.skipBecausePipelineRunPipelineTimeoutReached(facts):
skippingReason = v1.PipelineTimedOutSkip
case t.skipBecausePipelineRunTasksTimeoutReached(facts):
skippingReason = v1.TasksTimedOutSkip
case t.skipBecauseEmptyArrayInMatrixParams():
skippingReason = v1.EmptyArrayInMatrixParams
default:
skippingReason = v1.None
}
return TaskSkipStatus{
IsSkipped: skippingReason != v1.None,
SkippingReason: skippingReason,
}
}
// Skip returns true if a PipelineTask will not be run because
// (1) its When Expressions evaluated to false
// (2) its Condition Checks failed
// (3) its parent task was skipped
// (4) Pipeline is in stopping state (one of the PipelineTasks failed)
// (5) Pipeline is gracefully cancelled or stopped
func (t *ResolvedPipelineTask) Skip(facts *PipelineRunFacts) TaskSkipStatus {
if facts.SkipCache == nil {
facts.SkipCache = make(map[string]TaskSkipStatus)
}
if _, cached := facts.SkipCache[t.PipelineTask.Name]; !cached {
facts.SkipCache[t.PipelineTask.Name] = t.skip(facts)
}
return facts.SkipCache[t.PipelineTask.Name]
}
// skipBecauseWhenExpressionsEvaluatedToFalse confirms that the when expressions have completed evaluating, and
// it returns true if any of the when expressions evaluate to false
func (t *ResolvedPipelineTask) skipBecauseWhenExpressionsEvaluatedToFalse(facts *PipelineRunFacts) bool {
if t.checkParentsDone(facts) {
if !t.PipelineTask.When.AllowsExecution(t.EvaluatedCEL) {
return true
}
}
return false
}
// skipBecauseParentTaskWasSkipped loops through the parent tasks and checks if the parent task skipped:
//
// if yes, is it because of when expressions?
// if yes, it ignores this parent skip and continue evaluating other parent tasks
// if no, it returns true to skip the current task because this parent task was skipped
// if no, it continues checking the other parent tasks
func (t *ResolvedPipelineTask) skipBecauseParentTaskWasSkipped(facts *PipelineRunFacts) bool {
stateMap := facts.State.ToMap()
node := facts.TasksGraph.Nodes[t.PipelineTask.Name]
for _, p := range node.Prev {
parentTask := stateMap[p.Key]
if parentSkipStatus := parentTask.Skip(facts); parentSkipStatus.IsSkipped {
// if the parent task was skipped due to its `when` expressions,
// then we should ignore that and continue evaluating if we should skip because of other parent tasks
if parentSkipStatus.SkippingReason == v1.WhenExpressionsSkip {
continue
}
return true
}
}
return false
}
// skipBecauseResultReferencesAreMissing checks if the task references results that cannot be resolved, which is a
// reason for skipping the task, and applies result references if found
func (t *ResolvedPipelineTask) skipBecauseResultReferencesAreMissing(facts *PipelineRunFacts) bool {
if t.checkParentsDone(facts) && t.hasResultReferences() {
resolvedResultRefs, pt, err := ResolveResultRefs(facts.State, PipelineRunState{t})
rpt := facts.State.ToMap()[pt]
if rpt != nil {
if err != nil &&
(t.PipelineTask.OnError == v1.PipelineTaskContinue ||
(t.IsFinalTask(facts) || rpt.Skip(facts).SkippingReason == v1.WhenExpressionsSkip)) {
return true
}
}
ApplyTaskResults(PipelineRunState{t}, resolvedResultRefs)
facts.ResetSkippedCache()
}
return false
}
// skipBecausePipelineRunPipelineTimeoutReached returns true if the task shouldn't be launched because the elapsed time since
// the PipelineRun started is greater than the PipelineRun's pipeline timeout
func (t *ResolvedPipelineTask) skipBecausePipelineRunPipelineTimeoutReached(facts *PipelineRunFacts) bool {
if t.checkParentsDone(facts) {
if facts.TimeoutsState.PipelineTimeout != nil && *facts.TimeoutsState.PipelineTimeout != config.NoTimeoutDuration && facts.TimeoutsState.StartTime != nil {
// If the elapsed time since the PipelineRun's start time is greater than the PipelineRun's Pipeline timeout, skip.
return facts.TimeoutsState.Clock.Since(*facts.TimeoutsState.StartTime) > *facts.TimeoutsState.PipelineTimeout
}
}
return false
}
// skipBecausePipelineRunTasksTimeoutReached returns true if the task shouldn't be launched because the elapsed time since
// the PipelineRun started is greater than the PipelineRun's tasks timeout
func (t *ResolvedPipelineTask) skipBecausePipelineRunTasksTimeoutReached(facts *PipelineRunFacts) bool {
if t.checkParentsDone(facts) && !t.IsFinalTask(facts) {
if facts.TimeoutsState.TasksTimeout != nil && *facts.TimeoutsState.TasksTimeout != config.NoTimeoutDuration && facts.TimeoutsState.StartTime != nil {
// If the elapsed time since the PipelineRun's start time is greater than the PipelineRun's Tasks timeout, skip.
return facts.TimeoutsState.Clock.Since(*facts.TimeoutsState.StartTime) > *facts.TimeoutsState.TasksTimeout
}
}
return false
}
// skipBecausePipelineRunFinallyTimeoutReached returns true if the task shouldn't be launched because the elapsed time since
// finally tasks started being executed is greater than the PipelineRun's finally timeout
func (t *ResolvedPipelineTask) skipBecausePipelineRunFinallyTimeoutReached(facts *PipelineRunFacts) bool {
if t.checkParentsDone(facts) && t.IsFinalTask(facts) {
if facts.TimeoutsState.FinallyTimeout != nil && *facts.TimeoutsState.FinallyTimeout != config.NoTimeoutDuration && facts.TimeoutsState.FinallyStartTime != nil {
// If the elapsed time since the PipelineRun's finally start time is greater than the PipelineRun's finally timeout, skip.
return facts.TimeoutsState.Clock.Since(*facts.TimeoutsState.FinallyStartTime) > *facts.TimeoutsState.FinallyTimeout
}
}
return false
}
// skipBecauseEmptyArrayInMatrixParams returns true if the matrix parameters contain an empty array
func (t *ResolvedPipelineTask) skipBecauseEmptyArrayInMatrixParams() bool {
if t.PipelineTask.IsMatrixed() {
for _, ps := range t.PipelineTask.Matrix.Params {
if ps.Value.Type == v1.ParamTypeArray && len(ps.Value.ArrayVal) == 0 {
return true
}
}
}
return false
}
// IsFinalTask returns true if a task is a finally task
func (t *ResolvedPipelineTask) IsFinalTask(facts *PipelineRunFacts) bool {
return facts.isFinalTask(t.PipelineTask.Name)
}
// IsFinallySkipped returns true if a finally task is not executed and skipped due to task result validation failure
func (t *ResolvedPipelineTask) IsFinallySkipped(facts *PipelineRunFacts) TaskSkipStatus {
var skippingReason v1.SkippingReason
switch {
case t.isScheduled():
skippingReason = v1.None
case facts.checkDAGTasksDone() && facts.isFinalTask(t.PipelineTask.Name):
switch {
case t.skipBecauseResultReferencesAreMissing(facts):
skippingReason = v1.MissingResultsSkip
case t.skipBecauseWhenExpressionsEvaluatedToFalse(facts):
skippingReason = v1.WhenExpressionsSkip
case t.skipBecausePipelineRunPipelineTimeoutReached(facts):
skippingReason = v1.PipelineTimedOutSkip
case t.skipBecausePipelineRunFinallyTimeoutReached(facts):
skippingReason = v1.FinallyTimedOutSkip
case t.skipBecauseEmptyArrayInMatrixParams():
skippingReason = v1.EmptyArrayInMatrixParams
default:
skippingReason = v1.None
}
default:
skippingReason = v1.None
}
return TaskSkipStatus{
IsSkipped: skippingReason != v1.None,
SkippingReason: skippingReason,
}
}
// GetRun is a function that will retrieve a CustomRun by name.
type GetRun func(name string) (*v1beta1.CustomRun, error)
// ValidateWorkspaceBindings validates that the Workspaces expected by a Pipeline are provided by a PipelineRun.
func ValidateWorkspaceBindings(p *v1.PipelineSpec, pr *v1.PipelineRun) error {
pipelineRunWorkspaces := make(map[string]v1.WorkspaceBinding)
for _, binding := range pr.Spec.Workspaces {
pipelineRunWorkspaces[binding.Name] = binding
}
for _, ws := range p.Workspaces {
if ws.Optional {
continue
}
if _, ok := pipelineRunWorkspaces[ws.Name]; !ok {
return pipelineErrors.WrapUserError(fmt.Errorf("pipeline requires workspace with name %q be provided by pipelinerun", ws.Name))
}
}
return nil
}
// ValidateTaskRunSpecs that the TaskRunSpecs defined by a PipelineRun are correct.
func ValidateTaskRunSpecs(p *v1.PipelineSpec, pr *v1.PipelineRun) error {
pipelineTasks := make(map[string]string)
for _, task := range p.Tasks {
pipelineTasks[task.Name] = task.Name
}
for _, task := range p.Finally {
pipelineTasks[task.Name] = task.Name
}
for _, taskrunSpec := range pr.Spec.TaskRunSpecs {
if _, ok := pipelineTasks[taskrunSpec.PipelineTaskName]; !ok {
return pipelineErrors.WrapUserError(fmt.Errorf("pipelineRun's taskrunSpecs defined wrong taskName: %q, does not exist in Pipeline", taskrunSpec.PipelineTaskName))
}
}
return nil
}
// ResolvePipelineTask returns a new ResolvedPipelineTask representing any TaskRuns or CustomRuns
// associated with this Pipeline Task, if they exist.
//
// If the Pipeline Task is a Task, it retrieves any TaskRuns, plus the Task spec, and updates the ResolvedPipelineTask
// with this information. It also sets the ResolvedPipelineTask's TaskRunName(s) with the names of TaskRuns
// that should be or already have been created.
//
// If the Pipeline Task is a Custom Task, it retrieves any CustomRuns and updates the ResolvedPipelineTask with this information.
// It also sets the ResolvedPipelineTask's RunName(s) with the names of CustomRuns that should be or already have been created.
//
// If the Pipeline Task is a Pipeline, it retrieves any child PipelineRuns, plus the Pipeline spec and updates the
// ResolvedPipelineTask with this information. It also sets the ResolvedPipelineTask's ChildPipelineRunName(s) with the names
// of child PipelineRuns that should be or already have been created.
func ResolvePipelineTask(
ctx context.Context,
pipelineRun v1.PipelineRun,
getChildPipelineRun GetPipelineRun,
getTask resources.GetTask,
getTaskRun resources.GetTaskRun,
getRun GetRun,
pipelineTask v1.PipelineTask,
pst PipelineRunState,
) (*ResolvedPipelineTask, error) {
rpt := ResolvedPipelineTask{
PipelineTask: &pipelineTask,
}
rpt.CustomTask = rpt.PipelineTask.TaskRef.IsCustomTask() || rpt.PipelineTask.TaskSpec.IsCustomTask()
numCombinations := 1
// We want to resolve all of the result references and ignore any errors at this point since there could be
// instances where result references are missing here, but will be later skipped and resolved in
// skipBecauseResultReferencesAreMissing. The final validation is handled in CheckMissingResultReferences.
resolvedResultRefs, _, _ := ResolveResultRefs(pst, PipelineRunState{&rpt})
if err := validateArrayResultsIndex(resolvedResultRefs); err != nil {
return nil, err
}
ApplyTaskResults(PipelineRunState{&rpt}, resolvedResultRefs)
if rpt.PipelineTask.IsMatrixed() {
numCombinations = rpt.PipelineTask.Matrix.CountCombinations()
}
switch {
case rpt.IsChildPipeline():
rpt.ChildPipelineRunNames = GetNamesOfChildPipelineRuns(
pipelineRun.Status.ChildReferences,
pipelineTask.Name,
pipelineRun.Name,
numCombinations,
)
// happy path: no pipelineRef, no local/remote resolution, no getPipeline
for _, childPipelineRunName := range rpt.ChildPipelineRunNames {
if err := rpt.setChildPipelineRunsAndResolvedPipeline(ctx, childPipelineRunName, getChildPipelineRun, pipelineTask); err != nil {
return nil, err
}
}
case rpt.IsCustomTask():
rpt.CustomRunNames = getNamesOfCustomRuns(pipelineRun.Status.ChildReferences, pipelineTask.Name, pipelineRun.Name, numCombinations)
for _, runName := range rpt.CustomRunNames {
run, err := getRun(runName)
if err != nil && !kerrors.IsNotFound(err) {
return nil, fmt.Errorf("error retrieving CustomRun %s: %w", runName, err)
}
if run != nil {
rpt.CustomRuns = append(rpt.CustomRuns, run)
}
}
default:
rpt.TaskRunNames = GetNamesOfTaskRuns(pipelineRun.Status.ChildReferences, pipelineTask.Name, pipelineRun.Name, numCombinations)
for _, taskRunName := range rpt.TaskRunNames {
if err := rpt.setTaskRunsAndResolvedTask(ctx, taskRunName, getTask, getTaskRun, pipelineTask); err != nil {
return nil, err
}
}
}
return &rpt, nil
}
func (t *ResolvedPipelineTask) setChildPipelineRunsAndResolvedPipeline(
ctx context.Context,
childPipelineRunName string,
getChildPipelineRun GetPipelineRun,
pipelineTask v1.PipelineTask,
) error {
childPipelineRun, err := getChildPipelineRun(childPipelineRunName)
if err != nil {
if !kerrors.IsNotFound(err) {
return fmt.Errorf("error retrieving child PipelineRun %s: %w", childPipelineRunName, err)
}
}
if childPipelineRun != nil {
t.ChildPipelineRuns = append(t.ChildPipelineRuns, childPipelineRun)
}
rp := ResolvedPipeline{}
switch {
case pipelineTask.PipelineSpec != nil:
rp.PipelineSpec = pipelineTask.PipelineSpec
case pipelineTask.PipelineRef != nil:
return fmt.Errorf("PipelineRef for PipelineTask %q is not yet implemented", pipelineTask.Name)
default:
return fmt.Errorf("PipelineSpec in PipelineTask %q missing", pipelineTask.Name)
}
t.ResolvedPipeline = rp
return nil
}
// setTaskRunsAndResolvedTask fetches the named TaskRun using the input function getTaskRun,
// and the resolved Task spec of the Pipeline Task using the input function getTask.
// It updates the ResolvedPipelineTask with the ResolvedTask and a pointer to the fetched TaskRun.
func (t *ResolvedPipelineTask) setTaskRunsAndResolvedTask(
ctx context.Context,
taskRunName string,
getTask resources.GetTask,
getTaskRun resources.GetTaskRun,
pipelineTask v1.PipelineTask,
) error {
taskRun, err := getTaskRun(taskRunName)
if err != nil {
if !kerrors.IsNotFound(err) {
return fmt.Errorf("error retrieving TaskRun %s: %w", taskRunName, err)
}
}
if taskRun != nil {
t.TaskRuns = append(t.TaskRuns, taskRun)
}
rt, err := resolveTask(ctx, taskRun, getTask, pipelineTask)
if err != nil {
return err
}
t.ResolvedTask = rt
return nil
}
// resolveTask fetches the Task spec for the PipelineTask and sets its default values.
// It returns a ResolvedTask with the defaulted spec, name, and kind (namespaced Task or Cluster Task) of the Task.
// Returns an error if the Task could not be found because resolution was in progress or any other reason.
func resolveTask(
ctx context.Context,
taskRun *v1.TaskRun,
getTask resources.GetTask,
pipelineTask v1.PipelineTask,
) (*resources.ResolvedTask, error) {
rt := &resources.ResolvedTask{}
switch {
case pipelineTask.TaskRef != nil:
// If the TaskRun has already a stored TaskSpec in its status, use it as source of truth
if taskRun != nil && taskRun.Status.TaskSpec != nil {
rt.TaskSpec = taskRun.Status.TaskSpec
rt.TaskName = pipelineTask.TaskRef.Name
} else {
// Following minimum status principle (TEP-0100), no need to propagate the RefSource about PipelineTask up to PipelineRun status.
// Instead, the child TaskRun's status will be the place recording the RefSource of individual task.
t, _, vr, err := getTask(ctx, pipelineTask.TaskRef.Name)
switch {
case errors.Is(err, remote.ErrRequestInProgress) || (err != nil && resolutioncommon.IsErrTransient(err)):
return rt, err
case err != nil:
// some of the resolvers obtain the name from the parameters instead of from the TaskRef.Name field,
// so we account for both locations when constructing the error
name := pipelineTask.TaskRef.Name
if len(strings.TrimSpace(name)) == 0 {
name = resource.GenerateErrorLogString(string(pipelineTask.TaskRef.Resolver), pipelineTask.TaskRef.Params)
}
return rt, &TaskNotFoundError{
Name: name,
Err: err,
}
default:
spec := t.Spec
rt.TaskSpec = &spec
rt.TaskName = t.Name
rt.VerificationResult = vr
}
}
rt.Kind = pipelineTask.TaskRef.Kind
case pipelineTask.TaskSpec != nil:
rt.TaskSpec = &pipelineTask.TaskSpec.TaskSpec
default:
// If the alpha feature is enabled, and the user has configured pipelineSpec or pipelineRef, it will enter here.
// Currently, the controller is not yet adapted, and to avoid a panic, an error message is provided here.
// TODO: Adjust the logic here once the feature is supported in the future.
return nil, fmt.Errorf("currently, Task %q does not support PipelineRef, please use PipelineSpec, TaskRef or TaskSpec instead", pipelineTask.Name)
}
rt.TaskSpec.SetDefaults(ctx)
return rt, nil
}
// GetTaskRunName should return a unique name for a `TaskRun` if one has not already been defined, and the existing one otherwise.
func GetTaskRunName(childRefs []v1.ChildStatusReference, ptName, prName string) string {
for _, cr := range childRefs {
if cr.Kind == pipeline.TaskRunControllerName && cr.PipelineTaskName == ptName {
return cr.Name
}
}
return kmeta.ChildName(prName, "-"+ptName)
}
// GetNamesOfTaskRuns should return unique names for `TaskRuns` if one has not already been defined, and the existing one otherwise.
func GetNamesOfTaskRuns(childRefs []v1.ChildStatusReference, ptName, prName string, numberOfTaskRuns int) []string {
if taskRunNames := getTaskRunNamesFromChildRefs(childRefs, ptName); taskRunNames != nil {
return taskRunNames
}
return getNewRunNames(ptName, prName, numberOfTaskRuns)
}
// GetNamesOfChildPipelineRuns should return unique names for child PipelineRuns if one has not already been
// defined, and the existing one otherwise.
func GetNamesOfChildPipelineRuns(childRefs []v1.ChildStatusReference, ptName, prName string, numberOfPipelineRuns int) []string {
if pipelineRunNames := getChildPipelineRunNamesFromChildRefs(childRefs, ptName); pipelineRunNames != nil {
return pipelineRunNames
}
return getNewRunNames(ptName, prName, numberOfPipelineRuns)
}
// getTaskRunNamesFromChildRefs returns the names of TaskRuns defined in childRefs that are associated with the named Pipeline Task.
func getTaskRunNamesFromChildRefs(childRefs []v1.ChildStatusReference, ptName string) []string {
var taskRunNames []string
for _, cr := range childRefs {
if cr.Kind == pipeline.TaskRunControllerName && cr.PipelineTaskName == ptName {
taskRunNames = append(taskRunNames, cr.Name)
}
}
return taskRunNames
}
func getNewRunNames(ptName, prName string, numberOfRuns int) []string {
var runNames []string
// If it is a singular PipelineRun/TaskRun/CustomRun, we only append the ptName
if numberOfRuns == 1 {
runName := kmeta.ChildName(prName, "-"+ptName)
return append(runNames, runName)
}
// For a matrix we append i to the end of the fanned out PipelineRun/TaskRun/CustomRun "matrixed-pr-taskrun-0"
for i := range numberOfRuns {
runName := kmeta.ChildName(prName, fmt.Sprintf("-%s-%d", ptName, i))
// check if the PipelineRun/TaskRun/CustomRun name ends with a matrix instance count
if !strings.HasSuffix(runName, fmt.Sprintf("-%d", i)) {
runName = kmeta.ChildName(prName, "-"+ptName)
// kmeta.ChildName limits the size of a name to max of 63 characters based on k8s guidelines
// truncate the name such that "-<matrix-id>" can be appended to the PipelineRun/TaskRun/CustomRun name
longest := 63 - len(fmt.Sprintf("-%d", numberOfRuns))
runName = runName[0:longest]
runName = fmt.Sprintf("%s-%d", runName, i)
}
runNames = append(runNames, runName)
}
return runNames
}
// getCustomRunName should return a unique name for a `Run` if one has not already
// been defined, and the existing one otherwise.
func getCustomRunName(childRefs []v1.ChildStatusReference, ptName, prName string) string {
for _, cr := range childRefs {
if cr.PipelineTaskName == ptName {
if cr.Kind == pipeline.CustomRunControllerName {
return cr.Name
}
}
}
return kmeta.ChildName(prName, "-"+ptName)
}
// getNamesOfCustomRuns should return a unique names for `CustomRuns` if they have not already been defined,
// and the existing ones otherwise.
func getNamesOfCustomRuns(childRefs []v1.ChildStatusReference, ptName, prName string, numberOfRuns int) []string {
if customRunNames := getRunNamesFromChildRefs(childRefs, ptName); customRunNames != nil {
return customRunNames
}
return getNewRunNames(ptName, prName, numberOfRuns)
}
// getRunNamesFromChildRefs returns the names of CustomRuns defined in childRefs that are associated with the named Pipeline Task.
func getRunNamesFromChildRefs(childRefs []v1.ChildStatusReference, ptName string) []string {
var runNames []string
for _, cr := range childRefs {
if cr.PipelineTaskName == ptName {
if cr.Kind == pipeline.CustomRunControllerName {
runNames = append(runNames, cr.Name)
}
}
}
return runNames
}
// getChildPipelineRunNamesFromChildRefs returns the names of child PipelineRuns defined in childRefs that are
// associated with the named Pipeline Task.
func getChildPipelineRunNamesFromChildRefs(childRefs []v1.ChildStatusReference, ptName string) []string {
var childPipelineRunNames []string
for _, cr := range childRefs {
if cr.PipelineTaskName == ptName {
if cr.Kind == pipeline.PipelineRunControllerName {
childPipelineRunNames = append(childPipelineRunNames, cr.Name)
}
}
}
return childPipelineRunNames
}
func (t *ResolvedPipelineTask) hasResultReferences() bool {
var matrixParams v1.Params
if t.PipelineTask.IsMatrixed() {
matrixParams = t.PipelineTask.Params
}
for _, param := range append(t.PipelineTask.Params, matrixParams...) {
if ps, ok := param.GetVarSubstitutionExpressions(); ok {
if v1.LooksLikeContainsResultRefs(ps) {
return true
}
}
}
for _, we := range t.PipelineTask.When {
if ps, ok := we.GetVarSubstitutionExpressions(); ok {
if v1.LooksLikeContainsResultRefs(ps) {
return true
}
}
}
return false
}
func isCustomRunCancelledByPipelineRunTimeout(cr *v1beta1.CustomRun) bool {
return cr.Spec.StatusMessage == v1beta1.CustomRunCancelledByPipelineTimeoutMsg
}
// CheckMissingResultReferences returns an error if it is missing any result references.
// Missing result references can occur if task fails to produce a result but has
// OnError: continue (ie TestMissingResultWhenStepErrorIsIgnored)
func CheckMissingResultReferences(pipelineRunState PipelineRunState, target *ResolvedPipelineTask) error {
for _, resultRef := range v1.PipelineTaskResultRefs(target.PipelineTask) {
referencedPipelineTask, ok := pipelineRunState.ToMap()[resultRef.PipelineTask]
if !ok {
return fmt.Errorf("Result reference error: Could not find ref \"%s\" in internal pipelineRunState", resultRef.PipelineTask)
}
if referencedPipelineTask.IsCustomTask() {
if len(referencedPipelineTask.CustomRuns) == 0 {
return fmt.Errorf("Result reference error: Internal result ref \"%s\" has zero-length CustomRuns", resultRef.PipelineTask)
}
customRun := referencedPipelineTask.CustomRuns[0]
_, err := findRunResultForParam(customRun, resultRef)
if err != nil {
return err
}
} else {
if len(referencedPipelineTask.TaskRuns) == 0 {
return fmt.Errorf("Result reference error: Internal result ref \"%s\" has zero-length TaskRuns", resultRef.PipelineTask)
}
taskRun := referencedPipelineTask.TaskRuns[0]
_, err := findTaskResultForParam(taskRun, resultRef)
if err != nil {
return err
}
}
}
return nil
}
// createResultsCacheMatrixedTaskRuns creates a cache of results that have been fanned out from a
// referenced matrixed PipelineTask so that you can easily access these results in subsequent Pipeline Tasks
func createResultsCacheMatrixedTaskRuns(rpt *ResolvedPipelineTask) (resultsCache map[string][]string) {
if len(rpt.ResultsCache) == 0 {
resultsCache = make(map[string][]string)
}
// Sort the taskRuns by name to ensure the order is deterministic
sort.Slice(rpt.TaskRuns, func(i, j int) bool {
return rpt.TaskRuns[i].Name < rpt.TaskRuns[j].Name
})
for _, taskRun := range rpt.TaskRuns {
results := taskRun.Status.Results
for _, result := range results {
resultsCache[result.Name] = append(resultsCache[result.Name], result.Value.StringVal)
}
}
return resultsCache
}
// ValidateParamEnumSubset finds the referenced pipeline-level params in the resolved pipelineTask.
// It then validates if the referenced pipeline-level param enums are subsets of the resolved pipelineTask-level param enums
func ValidateParamEnumSubset(pipelineTaskParams []v1.Param, pipelineParamSpecs []v1.ParamSpec, rt *resources.ResolvedTask) error {
// When the matrix Task has no TaskRun, the rt will be nil, we should skip the validation.
if rt == nil {
return nil
}
for _, p := range pipelineTaskParams {
// calculate referenced param enums
res, present, errString := substitution.ExtractVariablesFromString(p.Value.StringVal, "params")
if errString != "" {
return fmt.Errorf("unexpected error in ExtractVariablesFromString: %s", errString)
}
// if multiple params are extracted, that means the task-level param is a compounded value, skip subset validation
if !present || len(res) > 1 {
continue
}
// resolve pipeline-level and pipelineTask-level enums
paramName := substitution.TrimArrayIndex(res[0])
pipelineParam := getParamFromName(paramName, pipelineParamSpecs)
resolvedTaskParam := getParamFromName(p.Name, rt.TaskSpec.Params)
// param enum is only supported for string param type,
// we only validate the enum subset requirement for string typed param.
// If there is no task-level enum (allowing any value), any pipeline-level enum is allowed
if pipelineParam.Type != v1.ParamTypeString || len(resolvedTaskParam.Enum) == 0 {
return nil
}
// if pipelin-level enum is empty (allowing any value) but task-level enum is not, it is not a "subset"
if len(pipelineParam.Enum) == 0 && len(resolvedTaskParam.Enum) > 0 {
return fmt.Errorf("pipeline param \"%s\" has no enum, but referenced in \"%s\" task has enums: %v", pipelineParam.Name, rt.TaskName, resolvedTaskParam.Enum)
}
// validate if pipeline-level enum is a subset of pipelineTask-level enum
if isValid := isSubset(pipelineParam.Enum, resolvedTaskParam.Enum); !isValid {
return fmt.Errorf("pipeline param \"%s\" enum: %v is not a subset of the referenced in \"%s\" task param enum: %v", pipelineParam.Name, pipelineParam.Enum, rt.TaskName, resolvedTaskParam.Enum)
}
}
return nil
}
func isSubset(pipelineEnum, taskEnum []string) bool {
pipelineEnumMap := make(map[string]bool)
TaskEnumMap := make(map[string]bool)
for _, e := range pipelineEnum {
pipelineEnumMap[e] = true
}
for _, e := range taskEnum {
TaskEnumMap[e] = true
}
for e := range pipelineEnumMap {
if !TaskEnumMap[e] {
return false
}
}
return true
}
func getParamFromName(name string, pss v1.ParamSpecs) v1.ParamSpec {
for _, ps := range pss {
if ps.Name == name {
return ps
}
}
return v1.ParamSpec{}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"context"
"fmt"
"strings"
"time"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag"
"github.com/tektoncd/pipeline/pkg/substitution"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
)
const (
// PipelineTaskStateNone indicates that the execution status of a pipelineTask is unknown
PipelineTaskStateNone = "None"
// PipelineTaskStatusPrefix is a prefix of the param representing execution state of pipelineTask
PipelineTaskStatusPrefix = "tasks."
// PipelineTaskStatusSuffix is a suffix of the param representing execution state of pipelineTask
PipelineTaskStatusSuffix = ".status"
PipelineTaskReasonSuffix = ".reason"
)
// PipelineRunState is a slice of ResolvedPipelineRunTasks the represents the current execution
// state of the PipelineRun.
type PipelineRunState []*ResolvedPipelineTask
// PipelineRunFacts holds the state of all the components that make up the Pipeline graph that are used to track the
// PipelineRun state without passing all these components separately. It helps simplify our implementation for getting
// and scheduling the next tasks. It is a collection of list of ResolvedPipelineTask, graph of DAG tasks, graph of
// finally tasks, cache of skipped tasks.
type PipelineRunFacts struct {
State PipelineRunState
SpecStatus v1.PipelineRunSpecStatus
TasksGraph *dag.Graph
FinalTasksGraph *dag.Graph
TimeoutsState PipelineRunTimeoutsState
// SkipCache is a hash of PipelineTask names that stores whether a task will be
// executed or not, because it's either not reachable via the DAG due to the pipeline
// state, or because it was skipped due to when expressions.
// We cache this data along the state, because it's expensive to compute, it requires
// traversing potentially the whole graph; this way it can built incrementally, when
// needed, via the `Skip` method in pipelinerunresolution.go
// The skip data is sensitive to changes in the state. The ResetSkippedCache method
// can be used to clean the cache and force re-computation when needed.
SkipCache map[string]TaskSkipStatus
// ValidationFailedTask are the tasks for which taskrun is not created as they
// never got added to the execution i.e. they failed in the validation step. One of
// the case of failing at the validation is during CheckMissingResultReferences method
// Tasks in ValidationFailedTask is added in method runNextSchedulableTask
ValidationFailedTask []*ResolvedPipelineTask
}
// PipelineRunTimeoutsState records information about start times and timeouts for the PipelineRun, so that the PipelineRunFacts
// can reference those values in its functions.
type PipelineRunTimeoutsState struct {
StartTime *time.Time
FinallyStartTime *time.Time
PipelineTimeout *time.Duration
TasksTimeout *time.Duration
FinallyTimeout *time.Duration
Clock clock.PassiveClock
}
// pipelineRunStatusCount holds the count of successful, failed, cancelled, skipped, and incomplete tasks
type pipelineRunStatusCount struct {
// skipped tasks count
Skipped int
// successful tasks count
Succeeded int
// failed tasks count
Failed int
// failed but ignored tasks count
IgnoredFailed int
// cancelled tasks count
Cancelled int
// number of tasks which are still pending, have not executed
Incomplete int
// count of tasks skipped due to the relevant timeout having elapsed before the task is launched
SkippedDueToTimeout int
// count of validation failed task and taskrun not created
ValidationFailed int
}
// ResetSkippedCache resets the skipped cache in the facts map
func (facts *PipelineRunFacts) ResetSkippedCache() {
facts.SkipCache = make(map[string]TaskSkipStatus)
}
// ToMap returns a map that maps pipeline task name to the resolved pipeline run task
func (state PipelineRunState) ToMap() map[string]*ResolvedPipelineTask {
m := make(map[string]*ResolvedPipelineTask)
for _, rpt := range state {
m[rpt.PipelineTask.Name] = rpt
}
return m
}
// IsBeforeFirstTaskRun returns true if the PipelineRun has not yet started its first child PipelineRun/TaskRun/CustomRun
func (state PipelineRunState) IsBeforeFirstTaskRun() bool {
for _, t := range state {
if len(t.ChildPipelineRuns) > 0 || len(t.CustomRuns) > 0 || len(t.TaskRuns) > 0 {
return false
}
}
return true
}
// AdjustStartTime adjusts potential drift in the PipelineRun's start time.
//
// The StartTime will only adjust earlier, so that the PipelineRun's StartTime
// is no later than any of its constituent TaskRuns.
//
// This drift could be due to us either failing to record the Run's start time
// previously, or our own failure to observe a prior update before reconciling
// the resource again.
func (state PipelineRunState) AdjustStartTime(unadjustedStartTime *metav1.Time) *metav1.Time {
adjustedStartTime := unadjustedStartTime
for _, rpt := range state {
for _, childPipelineRun := range rpt.ChildPipelineRuns {
if childPipelineRun.CreationTimestamp.Time.Before(adjustedStartTime.Time) {
adjustedStartTime = &childPipelineRun.CreationTimestamp
}
}
for _, customRun := range rpt.CustomRuns {
creationTime := customRun.GetObjectMeta().GetCreationTimestamp()
if creationTime.Time.Before(adjustedStartTime.Time) {
adjustedStartTime = &creationTime
}
}
for _, taskRun := range rpt.TaskRuns {
if taskRun.CreationTimestamp.Time.Before(adjustedStartTime.Time) {
adjustedStartTime = &taskRun.CreationTimestamp
}
}
}
return adjustedStartTime.DeepCopy()
}
// GetTaskRunsResults returns a map of all successfully completed TaskRuns in the state, with the pipeline task name as
// the key and the results from the corresponding TaskRun as the value. It only includes tasks which have completed successfully.
func (state PipelineRunState) GetTaskRunsResults() map[string][]v1.TaskRunResult {
results := make(map[string][]v1.TaskRunResult)
for _, rpt := range state {
if rpt.IsChildPipeline() {
continue
}
if rpt.IsCustomTask() {
continue
}
if !rpt.isSuccessful() {
continue
}
if rpt.PipelineTask.IsMatrixed() {
taskRunResults := ConvertResultsMapToTaskRunResults(rpt.ResultsCache)
if len(taskRunResults) > 0 {
results[rpt.PipelineTask.Name] = taskRunResults
}
} else {
results[rpt.PipelineTask.Name] = rpt.TaskRuns[0].Status.Results
}
}
return results
}
// GetTaskRunsArtifacts returns a map of all successfully completed TaskRuns in the state, with the pipeline task name as
// the key and the artifacts from the corresponding TaskRun as the value. It only includes tasks which have completed successfully.
func (state PipelineRunState) GetTaskRunsArtifacts() map[string]*v1.Artifacts {
results := make(map[string]*v1.Artifacts)
for _, rpt := range state {
if rpt.IsChildPipeline() {
continue
}
if rpt.IsCustomTask() {
continue
}
if !rpt.isSuccessful() {
continue
}
if rpt.PipelineTask.IsMatrixed() {
var ars v1.Artifacts
for _, tr := range rpt.TaskRuns {
ars.Merge(tr.Status.Artifacts)
}
results[rpt.PipelineTask.Name] = &ars
} else {
results[rpt.PipelineTask.Name] = rpt.TaskRuns[0].Status.Artifacts
}
}
return results
}
// ConvertResultsMapToTaskRunResults converts the map of results from Matrixed PipelineTasks to a list
// of TaskRunResults to standard the format
func ConvertResultsMapToTaskRunResults(resultsMap map[string][]string) []v1.TaskRunResult {
var taskRunResults []v1.TaskRunResult
for result, val := range resultsMap {
taskRunResult := v1.TaskRunResult{
Name: result,
Type: v1.ResultsTypeArray,
Value: v1.ParamValue{
Type: v1.ParamTypeArray,
ArrayVal: val,
},
}
taskRunResults = append(taskRunResults, taskRunResult)
}
return taskRunResults
}
// GetRunsResults returns a map of all successfully completed Runs in the state, with the pipeline task name as the key
// and the results from the corresponding TaskRun as the value. It only includes runs which have completed successfully.
func (state PipelineRunState) GetRunsResults() map[string][]v1beta1.CustomRunResult {
results := make(map[string][]v1beta1.CustomRunResult)
for _, rpt := range state {
if !rpt.IsCustomTask() {
continue
}
if !rpt.isSuccessful() {
continue
}
// Currently a Matrix cannot produce results so this is for a singular CustomRun
if len(rpt.CustomRuns) == 1 {
cr := rpt.CustomRuns[0]
results[rpt.PipelineTask.Name] = cr.Status.Results
}
}
return results
}
// GetChildReferences returns a slice of references, including version, kind, name, and pipeline task name, for all
// child PipelineRuns, TaskRuns and Runs in the state.
func (facts *PipelineRunFacts) GetChildReferences() []v1.ChildStatusReference {
var childRefs []v1.ChildStatusReference
for _, rpt := range facts.State {
// try to replace the parameters of the reference result of when expression in the TaskRun that has ended
if rpt.isDone(facts) {
resolvedResultRefs, _, err := ResolveResultRefs(facts.State, PipelineRunState{rpt})
if err == nil {
ApplyTaskResults(facts.State, resolvedResultRefs)
}
}
switch {
case len(rpt.ChildPipelineRuns) != 0:
for _, childPipelineRun := range rpt.ChildPipelineRuns {
if childPipelineRun != nil {
childRefs = append(childRefs, rpt.getChildRefForChildPipelineRun(childPipelineRun))
}
}
case len(rpt.TaskRuns) != 0:
for _, taskRun := range rpt.TaskRuns {
if taskRun != nil {
childRefs = append(childRefs, rpt.getChildRefForTaskRun(taskRun))
}
}
case len(rpt.CustomRuns) != 0:
for _, run := range rpt.CustomRuns {
childRefs = append(childRefs, rpt.getChildRefForRun(run))
}
}
}
return childRefs
}
func (t *ResolvedPipelineTask) getDisplayName(pipelineRun *v1.PipelineRun, customRun *v1beta1.CustomRun, taskRun *v1.TaskRun, c v1.ChildStatusReference) v1.ChildStatusReference {
replacements := make(map[string]string)
if pipelineRun != nil {
for _, p := range pipelineRun.Spec.Params {
if p.Value.Type == v1.ParamTypeString {
replacements[fmt.Sprintf("%s.%s", v1.ParamsPrefix, p.Name)] = p.Value.StringVal
}
}
}
if taskRun != nil {
for _, p := range taskRun.Spec.Params {
if p.Value.Type == v1.ParamTypeString {
replacements[fmt.Sprintf("%s.%s", v1.ParamsPrefix, p.Name)] = p.Value.StringVal
}
}
}
if customRun != nil {
for _, p := range customRun.Spec.Params {
if p.Value.Type == v1beta1.ParamTypeString {
replacements[fmt.Sprintf("%s.%s", v1.ParamsPrefix, p.Name)] = p.Value.StringVal
}
}
}
if t.PipelineTask.DisplayName != "" {
c.DisplayName = substitution.ApplyReplacements(t.PipelineTask.DisplayName, replacements)
}
if t.PipelineTask.Matrix != nil {
var dn string
for _, i := range t.PipelineTask.Matrix.Include {
if i.Name == "" {
continue
}
match := true
for _, ip := range i.Params {
v, ok := replacements[fmt.Sprintf("%s.%s", v1.ParamsPrefix, ip.Name)]
if !ok || (ip.Value.Type == v1.ParamTypeString && ip.Value.StringVal != v) {
match = false
break
}
}
if match {
dn = fmt.Sprintf("%s %s", dn, substitution.ApplyReplacements(i.Name, replacements))
}
}
if dn != "" {
c.DisplayName = strings.TrimSpace(dn)
}
}
return c
}
func (t *ResolvedPipelineTask) getChildRefForChildPipelineRun(pipelineRun *v1.PipelineRun) v1.ChildStatusReference {
c := v1.ChildStatusReference{
TypeMeta: runtime.TypeMeta{
APIVersion: v1.SchemeGroupVersion.String(),
Kind: pipeline.PipelineRunControllerName,
},
Name: pipelineRun.Name,
PipelineTaskName: t.PipelineTask.Name,
WhenExpressions: t.PipelineTask.When,
}
return t.getDisplayName(pipelineRun, nil, nil, c)
}
func (t *ResolvedPipelineTask) getChildRefForRun(customRun *v1beta1.CustomRun) v1.ChildStatusReference {
c := v1.ChildStatusReference{
TypeMeta: runtime.TypeMeta{
APIVersion: v1beta1.SchemeGroupVersion.String(),
Kind: pipeline.CustomRunControllerName,
},
Name: customRun.GetObjectMeta().GetName(),
PipelineTaskName: t.PipelineTask.Name,
WhenExpressions: t.PipelineTask.When,
}
return t.getDisplayName(nil, customRun, nil, c)
}
func (t *ResolvedPipelineTask) getChildRefForTaskRun(taskRun *v1.TaskRun) v1.ChildStatusReference {
c := v1.ChildStatusReference{
TypeMeta: runtime.TypeMeta{
APIVersion: v1.SchemeGroupVersion.String(),
Kind: pipeline.TaskRunControllerName,
},
Name: taskRun.Name,
PipelineTaskName: t.PipelineTask.Name,
WhenExpressions: t.PipelineTask.When,
}
return t.getDisplayName(nil, nil, taskRun, c)
}
// getNextTasks returns a list of pipeline tasks which should be executed next i.e.
// a list of tasks from candidateTasks which aren't yet indicated in state to be running and
// a list of cancelled/failed tasks from candidateTasks which haven't exhausted their retries
func (state PipelineRunState) getNextTasks(candidateTasks sets.String) []*ResolvedPipelineTask {
tasks := []*ResolvedPipelineTask{}
for _, t := range state {
if _, ok := candidateTasks[t.PipelineTask.Name]; ok {
if len(t.TaskRuns) == 0 && len(t.CustomRuns) == 0 && len(t.ChildPipelineRuns) == 0 {
tasks = append(tasks, t)
}
}
}
return tasks
}
// IsStopping returns true if the PipelineRun won't be scheduling any new Task because
// at least one task already failed (with onError: stopAndFail) or was cancelled in the specified dag
func (facts *PipelineRunFacts) IsStopping() bool {
for _, t := range facts.State {
if facts.isDAGTask(t.PipelineTask.Name) {
if (t.isFailure() || t.isValidationFailed(facts.ValidationFailedTask)) && t.PipelineTask.OnError != v1.PipelineTaskContinue {
return true
}
}
}
return false
}
// IsRunning returns true if the PipelineRun is still running tasks in the specified dag
func (facts *PipelineRunFacts) IsRunning() bool {
for _, t := range facts.State {
if facts.isDAGTask(t.PipelineTask.Name) {
if t.IsRunning() {
return true
}
}
}
return false
}
// IsCancelled returns true if the PipelineRun was cancelled
func (facts *PipelineRunFacts) IsCancelled() bool {
return facts.SpecStatus == v1.PipelineRunSpecStatusCancelled
}
// IsGracefullyCancelled returns true if the PipelineRun was gracefully cancelled
func (facts *PipelineRunFacts) IsGracefullyCancelled() bool {
return facts.SpecStatus == v1.PipelineRunSpecStatusCancelledRunFinally
}
// IsGracefullyStopped returns true if the PipelineRun was gracefully stopped
func (facts *PipelineRunFacts) IsGracefullyStopped() bool {
return facts.SpecStatus == v1.PipelineRunSpecStatusStoppedRunFinally
}
// DAGExecutionQueue returns a list of DAG tasks which needs to be scheduled next
func (facts *PipelineRunFacts) DAGExecutionQueue() (PipelineRunState, error) {
var tasks PipelineRunState
// when pipelinerun is cancelled or gracefully cancelled, do not schedule any new tasks,
// and only wait for all running tasks to complete (without exhausting retries).
if facts.IsCancelled() || facts.IsGracefullyCancelled() {
return tasks, nil
}
// candidateTasks is initialized to DAG root nodes to start pipeline execution
// candidateTasks is derived based on successfully finished tasks and/or skipped tasks
candidateTasks, err := dag.GetCandidateTasks(facts.TasksGraph, facts.completedOrSkippedDAGTasks()...)
if err != nil {
return tasks, err
}
if !facts.IsStopping() && !facts.IsGracefullyStopped() {
tasks = facts.State.getNextTasks(candidateTasks)
}
return tasks, nil
}
// GetFinalTaskNames returns a list of all final task names
func (facts *PipelineRunFacts) GetFinalTaskNames() sets.String {
names := sets.NewString()
// return list of tasks with all final tasks
for _, t := range facts.State {
if facts.isFinalTask(t.PipelineTask.Name) {
names.Insert(t.PipelineTask.Name)
}
}
return names
}
// GetTaskNames returns a list of all non-final task names
func (facts *PipelineRunFacts) GetTaskNames() sets.String {
names := sets.NewString()
// return list of tasks with all final tasks
for _, t := range facts.State {
if !facts.isFinalTask(t.PipelineTask.Name) {
names.Insert(t.PipelineTask.Name)
}
}
return names
}
// GetFinalTasks returns a list of final tasks which needs to be executed next
// GetFinalTasks returns final tasks only when all DAG tasks have finished executing or have been skipped
func (facts *PipelineRunFacts) GetFinalTasks() PipelineRunState {
tasks := PipelineRunState{}
finalCandidates := sets.NewString()
// check either pipeline has finished executing all DAG pipelineTasks,
// where "finished executing" means succeeded, failed, or skipped.
if facts.checkDAGTasksDone() {
// return list of tasks with all final tasks
for _, t := range facts.State {
if facts.isFinalTask(t.PipelineTask.Name) {
finalCandidates.Insert(t.PipelineTask.Name)
}
}
tasks = facts.State.getNextTasks(finalCandidates)
}
return tasks
}
// IsFinalTaskStarted returns true if all DAG pipelineTasks is finished and one or more final tasks have been created.
func (facts *PipelineRunFacts) IsFinalTaskStarted() bool {
// check either pipeline has finished executing all DAG pipelineTasks,
// where "finished executing" means succeeded, failed, or skipped.
if facts.checkDAGTasksDone() {
// return list of tasks with all final tasks
for _, t := range facts.State {
if facts.isFinalTask(t.PipelineTask.Name) && t.isScheduled() {
return true
}
}
}
return false
}
// GetPipelineConditionStatus will return the Condition that the PipelineRun prName should be
// updated with, based on the status of the child PipelineRuns/TaskRuns/CustomRuns in state.
func (facts *PipelineRunFacts) GetPipelineConditionStatus(ctx context.Context, pr *v1.PipelineRun, logger *zap.SugaredLogger, c clock.PassiveClock) *apis.Condition {
// We have 4 different states here:
// 1. Timed out -> Failed
// 2. All tasks are done and at least one has failed or has been cancelled -> Failed
// 3. All tasks are done or are skipped (i.e. condition check failed).-> Success
// 4. A Task or Condition is running right now or there are things left to run -> Running
if pr.HasTimedOut(ctx, c) {
return &apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: v1.PipelineRunReasonTimedOut.String(),
Message: fmt.Sprintf("PipelineRun %q failed to finish within %q", pr.Name, pr.PipelineTimeout(ctx).String()),
}
}
if pr.HaveTasksTimedOut(ctx, c) {
return &apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: v1.PipelineRunReasonTimedOut.String(),
Message: fmt.Sprintf("PipelineRun %q failed due to tasks failed to finish within %q", pr.Name, pr.TasksTimeout().Duration.String()),
}
}
// report the count in PipelineRun Status
// get the count of successful tasks, failed tasks, cancelled tasks, skipped task, and incomplete tasks
s := facts.getPipelineTasksCount()
// completed task is a collection of successful, failed, cancelled tasks
// (skipped tasks and validation failed tasks are reported separately)
cmTasks := s.Succeeded + s.Failed + s.Cancelled + s.IgnoredFailed
totalFailedTasks := s.Failed + s.IgnoredFailed
// The completion reason is set from the TaskRun completion reason
// by default, set it to ReasonRunning
reason := v1.PipelineRunReasonRunning.String()
// check if the pipeline is finished executing all tasks i.e. no incomplete tasks
if s.Incomplete == 0 {
status := corev1.ConditionTrue
reason := v1.PipelineRunReasonSuccessful.String()
var message string
if s.IgnoredFailed > 0 {
message = fmt.Sprintf("Tasks Completed: %d (Failed: %d (Ignored: %d), Cancelled %d), Skipped: %d",
cmTasks, totalFailedTasks, s.IgnoredFailed, s.Cancelled, s.Skipped)
} else {
message = fmt.Sprintf("Tasks Completed: %d (Failed: %d, Cancelled %d), Skipped: %d",
cmTasks, totalFailedTasks, s.Cancelled, s.Skipped)
}
// append validation failed count in the message
if s.ValidationFailed > 0 {
message += fmt.Sprintf(", Failed Validation: %d", s.ValidationFailed)
}
// Set reason to ReasonCompleted - At least one is skipped
if s.Skipped > 0 {
reason = v1.PipelineRunReasonCompleted.String()
}
switch {
case s.ValidationFailed > 0:
reason = v1.PipelineRunReasonFailedValidation.String()
status = corev1.ConditionFalse
case s.Failed > 0 || s.SkippedDueToTimeout > 0:
// Set reason to ReasonFailed - At least one failed
reason = v1.PipelineRunReasonFailed.String()
status = corev1.ConditionFalse
case pr.IsGracefullyCancelled() || pr.IsGracefullyStopped():
// Set reason to ReasonCancelled - Cancellation requested
reason = v1.PipelineRunReasonCancelled.String()
status = corev1.ConditionFalse
message = fmt.Sprintf("PipelineRun %q was cancelled", pr.Name)
case s.Cancelled > 0:
// Set reason to ReasonCancelled - At least one is cancelled and no failure yet
reason = v1.PipelineRunReasonCancelled.String()
status = corev1.ConditionFalse
}
logger.Infof("All child PipelineRuns/TaskRuns/CustomRuns have finished for PipelineRun %s so it has finished", pr.Name)
return &apis.Condition{
Type: apis.ConditionSucceeded,
Status: status,
Reason: reason,
Message: message,
}
}
// Hasn't timed out; not all tasks have finished.... Must keep running then....
switch {
case pr.IsGracefullyCancelled():
// Transition pipeline into running finally state, when graceful cancel is in progress
reason = v1.PipelineRunReasonCancelledRunningFinally.String()
case pr.IsGracefullyStopped():
// Transition pipeline into running finally state, when graceful stop is in progress
reason = v1.PipelineRunReasonStoppedRunningFinally.String()
case s.Cancelled > 0 || (s.Failed > 0 && facts.checkFinalTasksDone()):
// Transition pipeline into stopping state when one of the tasks(dag/final) cancelled or one of the dag tasks failed
// for a pipeline with final tasks, single dag task failure does not transition to interim stopping state
// pipeline stays in running state until all final tasks are done before transitioning to failed state
reason = v1.PipelineRunReasonStopping.String()
}
// return the status
return &apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
Reason: reason,
Message: fmt.Sprintf("Tasks Completed: %d (Failed: %d, Cancelled %d), Incomplete: %d, Skipped: %d",
cmTasks, s.Failed, s.Cancelled, s.Incomplete, s.Skipped),
}
}
// GetSkippedTasks constructs a list of SkippedTask struct to be included in the PipelineRun Status
func (facts *PipelineRunFacts) GetSkippedTasks() []v1.SkippedTask {
var skipped []v1.SkippedTask
for _, rpt := range facts.State {
if rpt.Skip(facts).IsSkipped {
skippedTask := v1.SkippedTask{
Name: rpt.PipelineTask.Name,
Reason: rpt.Skip(facts).SkippingReason,
WhenExpressions: rpt.PipelineTask.When,
}
skipped = append(skipped, skippedTask)
}
if rpt.IsFinallySkipped(facts).IsSkipped {
skippedTask := v1.SkippedTask{
Name: rpt.PipelineTask.Name,
Reason: rpt.IsFinallySkipped(facts).SkippingReason,
}
// include the when expressions only when the finally task was skipped because
// its when expressions evaluated to false (not because results variables were missing)
if rpt.IsFinallySkipped(facts).SkippingReason == v1.WhenExpressionsSkip {
skippedTask.WhenExpressions = rpt.PipelineTask.When
}
skipped = append(skipped, skippedTask)
}
}
return skipped
}
// GetPipelineTaskStatus returns the status of a PipelineTask depending on its child
// PipelineRun/TaskRun/CustomRun. The checks are implemented such that the finally tasks
// are requesting status of the dag tasks.
func (facts *PipelineRunFacts) GetPipelineTaskStatus() map[string]string {
// construct a map of tasks.<pipelineTask>.status and its state
tStatus := make(map[string]string)
for _, t := range facts.State {
if facts.isDAGTask(t.PipelineTask.Name) {
var s string
switch {
// execution status is Succeeded when a task has succeeded condition with status set to true
case t.isSuccessful():
s = v1.TaskRunReasonSuccessful.String()
// execution status is Failed when a task has succeeded condition with status set to false
case t.haveAnyRunsFailed():
s = v1.TaskRunReasonFailed.String()
default:
// None includes skipped as well
s = PipelineTaskStateNone
}
tStatus[PipelineTaskStatusPrefix+t.PipelineTask.Name+PipelineTaskStatusSuffix] = s
tStatus[PipelineTaskStatusPrefix+t.PipelineTask.Name+PipelineTaskReasonSuffix] = t.getReason()
}
}
// initialize aggregate status of all dag tasks to None
aggregateStatus := PipelineTaskStateNone
if facts.checkDAGTasksDone() {
// all dag pipeline tasks are done, change the aggregate status to succeeded
// will reset it to failed/skipped if needed
aggregateStatus = v1.PipelineRunReasonSuccessful.String()
for _, t := range facts.State {
if facts.isDAGTask(t.PipelineTask.Name) {
// if any of the dag pipeline tasks failed, change the aggregate status to failed and return
if t.IsChildPipeline() && t.haveAnyChildPipelineRunsFailed() {
aggregateStatus = v1.PipelineRunReasonFailed.String()
break
}
if t.IsCustomTask() && t.haveAnyCustomRunsFailed() {
aggregateStatus = v1.PipelineRunReasonFailed.String()
break
}
// if it's not a custom task or a child pipeline it's a task so we only
// need to check if any TaskRuns failed
if t.haveAnyTaskRunsFailed() {
aggregateStatus = v1.PipelineRunReasonFailed.String()
break
}
// if any of the dag task skipped, change the aggregate status to completed
// but continue checking for any other failure
if t.Skip(facts).IsSkipped {
aggregateStatus = v1.PipelineRunReasonCompleted.String()
}
}
}
}
tStatus[v1.PipelineTasksAggregateStatus] = aggregateStatus
return tStatus
}
// GetPipelineFinalTaskStatus returns the status of a PipelineFinalTask depending on its taskRun
func (facts *PipelineRunFacts) GetPipelineFinalTaskStatus() map[string]string {
// construct a map of tasks.<pipelineTask>.status and its state
tStatus := make(map[string]string)
for _, t := range facts.State {
if facts.isFinalTask(t.PipelineTask.Name) {
var s string
switch {
// execution status is Succeeded when a task has succeeded condition with status set to true
case t.isSuccessful():
s = v1.TaskRunReasonSuccessful.String()
// execution status is Failed when a task has succeeded condition with status set to false
case t.haveAnyRunsFailed():
s = v1.TaskRunReasonFailed.String()
default:
// None includes skipped as well
s = PipelineTaskStateNone
}
tStatus[PipelineTaskStatusPrefix+t.PipelineTask.Name+PipelineTaskStatusSuffix] = s
}
}
return tStatus
}
// completedOrSkippedTasks returns a list of the names of all of the PipelineTasks in state
// which have completed or skipped
func (facts *PipelineRunFacts) completedOrSkippedDAGTasks() []string {
tasks := []string{}
for _, t := range facts.State {
if facts.isDAGTask(t.PipelineTask.Name) {
if t.isDone(facts) {
tasks = append(tasks, t.PipelineTask.Name)
}
}
}
return tasks
}
// checkTasksDone returns true if all tasks from the specified graph are finished executing
// a task is considered done if it has failed/succeeded/skipped
func (facts *PipelineRunFacts) checkTasksDone(d *dag.Graph) bool {
for _, t := range facts.State {
if isTaskInGraph(t.PipelineTask.Name, d) {
if !t.isDone(facts) {
return false
}
}
}
return true
}
// check if all DAG tasks done executing (succeeded, failed, or skipped)
func (facts *PipelineRunFacts) checkDAGTasksDone() bool {
return facts.checkTasksDone(facts.TasksGraph)
}
// check if all finally tasks done executing (succeeded or failed)
func (facts *PipelineRunFacts) checkFinalTasksDone() bool {
return facts.checkTasksDone(facts.FinalTasksGraph)
}
// getPipelineTasksCount returns the count of successful tasks, failed tasks, cancelled tasks, skipped task, and incomplete tasks
func (facts *PipelineRunFacts) getPipelineTasksCount() pipelineRunStatusCount {
s := pipelineRunStatusCount{
Skipped: 0,
Succeeded: 0,
Failed: 0,
Cancelled: 0,
Incomplete: 0,
SkippedDueToTimeout: 0,
IgnoredFailed: 0,
ValidationFailed: 0,
}
for _, t := range facts.State {
switch {
// increment success counter since the task is successful
case t.isSuccessful():
s.Succeeded++
// increment failure counter since the task is cancelled due to a timeout
case t.isCancelledForTimeOut():
s.Failed++
// increment cancelled counter since the task is cancelled
case t.isCancelled():
s.Cancelled++
// increment failure counter based on Task OnError type since the task has failed
case t.isFailure():
if t.PipelineTask.OnError == v1.PipelineTaskContinue {
s.IgnoredFailed++
} else {
s.Failed++
}
case t.isValidationFailed(facts.ValidationFailedTask):
s.ValidationFailed++
// increment skipped and skipped due to timeout counters since the task was skipped due to the pipeline, tasks, or finally timeout being reached before the task was launched
case t.Skip(facts).SkippingReason == v1.PipelineTimedOutSkip ||
t.Skip(facts).SkippingReason == v1.TasksTimedOutSkip ||
t.IsFinallySkipped(facts).SkippingReason == v1.FinallyTimedOutSkip:
s.Skipped++
s.SkippedDueToTimeout++
// increment skip counter since the task is skipped
case t.Skip(facts).IsSkipped:
s.Skipped++
// checking if any finally tasks were referring to invalid/missing task results
case t.IsFinallySkipped(facts).IsSkipped:
s.Skipped++
// increment incomplete counter since the task is pending and not executed yet
default:
s.Incomplete++
}
}
return s
}
// check if a specified pipelineTask is defined under tasks(DAG) section
func (facts *PipelineRunFacts) isDAGTask(pipelineTaskName string) bool {
if _, ok := facts.TasksGraph.Nodes[pipelineTaskName]; ok {
return true
}
return false
}
// check if a specified pipelineTask is defined under finally section
func (facts *PipelineRunFacts) isFinalTask(pipelineTaskName string) bool {
if _, ok := facts.FinalTasksGraph.Nodes[pipelineTaskName]; ok {
return true
}
return false
}
// Check if a PipelineTask belongs to the specified Graph
func isTaskInGraph(pipelineTaskName string, d *dag.Graph) bool {
if _, ok := d.Nodes[pipelineTaskName]; ok {
return true
}
return false
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"encoding/json"
"errors"
"fmt"
"sort"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
)
// ErrInvalidTaskResultReference indicates that the reason for the failure status is that there
// is an invalid task result reference
var ErrInvalidTaskResultReference = pipelineErrors.WrapUserError(errors.New("Invalid task result reference"))
// ResolvedResultRefs represents all of the ResolvedResultRef for a pipeline task
type ResolvedResultRefs []*ResolvedResultRef
// ResolvedResultRef represents a result ref reference that has been fully resolved (value has been populated).
// If the value is from a Result, then the ResultReference will be populated to point to the ResultReference
// which resulted in the value
type ResolvedResultRef struct {
Value v1.ResultValue
ResultReference v1.ResultRef
FromTaskRun string
FromRun string
}
// ResolveResultRef resolves any ResultReference that are found in the target ResolvedPipelineTask
func ResolveResultRef(pipelineRunState PipelineRunState, target *ResolvedPipelineTask) (ResolvedResultRefs, string, error) {
resolvedResultRefs, pt, err := convertToResultRefs(pipelineRunState, target)
if err != nil {
return nil, pt, err
}
return removeDup(resolvedResultRefs), "", nil
}
// ResolveResultRefs resolves any ResultReference that are found in the target ResolvedPipelineTask
func ResolveResultRefs(pipelineRunState PipelineRunState, targets PipelineRunState) (ResolvedResultRefs, string, error) {
var allResolvedResultRefs ResolvedResultRefs
for _, target := range targets {
resolvedResultRefs, pt, err := convertToResultRefs(pipelineRunState, target)
if err != nil {
return nil, pt, err
}
allResolvedResultRefs = append(allResolvedResultRefs, resolvedResultRefs...)
}
return removeDup(allResolvedResultRefs), "", nil
}
// validateArrayResultsIndex checks if the result array indexing reference is out of bound of the array size
func validateArrayResultsIndex(allResolvedResultRefs ResolvedResultRefs) error {
for _, r := range allResolvedResultRefs {
if r.Value.Type == v1.ParamTypeArray {
if r.ResultReference.ResultsIndex != nil && *r.ResultReference.ResultsIndex >= len(r.Value.ArrayVal) {
return fmt.Errorf("array Result Index %d for Task %s Result %s is out of bound of size %d", *r.ResultReference.ResultsIndex, r.ResultReference.PipelineTask, r.ResultReference.Result, len(r.Value.ArrayVal))
}
}
}
return nil
}
func removeDup(refs ResolvedResultRefs) ResolvedResultRefs {
if refs == nil {
return nil
}
resolvedResultRefByRef := make(map[v1.ResultRef]*ResolvedResultRef, len(refs))
for _, resolvedResultRef := range refs {
resolvedResultRefByRef[resolvedResultRef.ResultReference] = resolvedResultRef
}
deduped := make([]*ResolvedResultRef, 0, len(resolvedResultRefByRef))
// Sort the resulting keys to produce a deterministic ordering.
order := make([]v1.ResultRef, 0, len(refs))
for key := range resolvedResultRefByRef {
order = append(order, key)
}
sort.Slice(order, func(i, j int) bool {
if order[i].PipelineTask > order[j].PipelineTask {
return false
}
if order[i].Result > order[j].Result {
return false
}
return true
})
for _, key := range order {
deduped = append(deduped, resolvedResultRefByRef[key])
}
return deduped
}
// convertToResultRefs walks a PipelineTask looking for result references. If any are
// found they are resolved to a value by searching pipelineRunState. The list of resolved
// references are returned. If an error is encountered due to an invalid result reference
// then a nil list and error is returned instead.
func convertToResultRefs(pipelineRunState PipelineRunState, target *ResolvedPipelineTask) (ResolvedResultRefs, string, error) {
var resolvedResultRefs ResolvedResultRefs
for _, resultRef := range v1.PipelineTaskResultRefs(target.PipelineTask) {
referencedPipelineTask := pipelineRunState.ToMap()[resultRef.PipelineTask]
if referencedPipelineTask == nil {
return nil, resultRef.PipelineTask, fmt.Errorf("could not find task %q referenced by result", resultRef.PipelineTask)
}
if !referencedPipelineTask.isSuccessful() && !referencedPipelineTask.isFailure() {
return nil, resultRef.PipelineTask, fmt.Errorf("task %q referenced by result was not finished", referencedPipelineTask.PipelineTask.Name)
}
// Custom Task
switch {
case referencedPipelineTask.IsCustomTask():
resolved, err := resolveCustomResultRef(referencedPipelineTask.CustomRuns, resultRef)
if err != nil {
return nil, resultRef.PipelineTask, err
}
resolvedResultRefs = append(resolvedResultRefs, resolved)
default:
// Matrixed referenced Pipeline Task
if referencedPipelineTask.PipelineTask.IsMatrixed() {
arrayValues, err := findResultValuesForMatrix(referencedPipelineTask, resultRef)
if err != nil {
return nil, resultRef.PipelineTask, err
}
for _, taskRun := range referencedPipelineTask.TaskRuns {
resolved := createMatrixedTaskResultForParam(taskRun.Name, arrayValues, resultRef)
resolvedResultRefs = append(resolvedResultRefs, resolved)
}
} else {
// Regular PipelineTask
resolved, err := resolveResultRef(referencedPipelineTask.TaskRuns, resultRef)
if err != nil {
return nil, resultRef.PipelineTask, err
}
resolvedResultRefs = append(resolvedResultRefs, resolved)
}
}
}
return resolvedResultRefs, "", nil
}
func resolveCustomResultRef(customRuns []*v1beta1.CustomRun, resultRef *v1.ResultRef) (*ResolvedResultRef, error) {
customRun := customRuns[0]
runName := customRun.GetObjectMeta().GetName()
runValue, err := findRunResultForParam(customRun, resultRef)
if err != nil {
return nil, err
}
return &ResolvedResultRef{
Value: *paramValueFromCustomRunResult(runValue),
FromTaskRun: "",
FromRun: runName,
ResultReference: *resultRef,
}, nil
}
func paramValueFromCustomRunResult(result string) *v1.ParamValue {
var arrayResult []string
// for fan out array result, which is represented as string, we should make it to array type param value
if err := json.Unmarshal([]byte(result), &arrayResult); err == nil && len(arrayResult) > 0 {
if len(arrayResult) > 1 {
return v1.NewStructuredValues(arrayResult[0], arrayResult[1:]...)
}
return &v1.ParamValue{
Type: v1.ParamTypeArray,
ArrayVal: []string{arrayResult[0]},
}
}
return v1.NewStructuredValues(result)
}
func resolveResultRef(taskRuns []*v1.TaskRun, resultRef *v1.ResultRef) (*ResolvedResultRef, error) {
taskRun := taskRuns[0]
taskRunName := taskRun.Name
resultValue, err := findTaskResultForParam(taskRun, resultRef)
if err != nil {
return nil, err
}
return &ResolvedResultRef{
Value: resultValue,
FromTaskRun: taskRunName,
FromRun: "",
ResultReference: *resultRef,
}, nil
}
func findRunResultForParam(customRun *v1beta1.CustomRun, reference *v1.ResultRef) (string, error) {
for _, result := range customRun.Status.Results {
if result.Name == reference.Result {
return result.Value, nil
}
}
err := fmt.Errorf("%w: Could not find result with name %s for pipeline task %s", ErrInvalidTaskResultReference, reference.Result, reference.PipelineTask)
return "", err
}
func findTaskResultForParam(taskRun *v1.TaskRun, reference *v1.ResultRef) (v1.ResultValue, error) {
results := taskRun.Status.TaskRunStatusFields.Results
for _, result := range results {
if result.Name == reference.Result {
return result.Value, nil
}
}
err := fmt.Errorf("%w: Could not find result with name %s for pipeline task %s", ErrInvalidTaskResultReference, reference.Result, reference.PipelineTask)
return v1.ResultValue{}, err
}
// findResultValuesForMatrix checks the resultsCache of the referenced Matrixed TaskRun to retrieve the resultValues and aggregate them into
// arrayValues. If the resultCache is empty, it will create the ResultCache so that the results can be accessed in subsequent tasks.
func findResultValuesForMatrix(referencedPipelineTask *ResolvedPipelineTask, resultRef *v1.ResultRef) (v1.ParamValue, error) {
var resultsCache *map[string][]string
if len(referencedPipelineTask.ResultsCache) == 0 {
cache := createResultsCacheMatrixedTaskRuns(referencedPipelineTask)
resultsCache = &cache
referencedPipelineTask.ResultsCache = *resultsCache
}
if arrayValues, ok := referencedPipelineTask.ResultsCache[resultRef.Result]; ok {
return v1.ParamValue{
Type: v1.ParamTypeArray,
ArrayVal: arrayValues,
}, nil
}
err := fmt.Errorf("%w: Could not find result with name %s for task %s", ErrInvalidTaskResultReference, resultRef.Result, resultRef.PipelineTask)
return v1.ParamValue{}, err
}
func createMatrixedTaskResultForParam(taskRunName string, paramValue v1.ParamValue, resultRef *v1.ResultRef) *ResolvedResultRef {
return &ResolvedResultRef{
Value: paramValue,
FromTaskRun: taskRunName,
FromRun: "",
ResultReference: *resultRef,
}
}
func (rs ResolvedResultRefs) getStringReplacements() map[string]string {
replacements := map[string]string{}
for _, r := range rs {
switch r.Value.Type {
case v1.ParamTypeArray:
for i := range len(r.Value.ArrayVal) {
for _, target := range r.getReplaceTargetfromArrayIndex(i) {
replacements[target] = r.Value.ArrayVal[i]
}
}
case v1.ParamTypeObject:
for key, element := range r.Value.ObjectVal {
for _, target := range r.getReplaceTargetfromObjectKey(key) {
replacements[target] = element
}
}
case v1.ParamTypeString:
fallthrough
default:
for _, target := range r.getReplaceTarget() {
replacements[target] = r.Value.StringVal
}
}
}
return replacements
}
func (rs ResolvedResultRefs) getArrayReplacements() map[string][]string {
replacements := map[string][]string{}
for _, r := range rs {
if r.Value.Type == v1.ParamType(v1.ResultsTypeArray) {
for _, target := range r.getReplaceTarget() {
replacements[target] = r.Value.ArrayVal
}
}
}
return replacements
}
func (rs ResolvedResultRefs) getObjectReplacements() map[string]map[string]string {
replacements := map[string]map[string]string{}
for _, r := range rs {
if r.Value.Type == v1.ParamType(v1.ResultsTypeObject) {
for _, target := range r.getReplaceTarget() {
replacements[target] = r.Value.ObjectVal
}
}
}
return replacements
}
func (r *ResolvedResultRef) getReplaceTarget() []string {
return []string{
fmt.Sprintf("%s.%s.%s.%s", v1.ResultTaskPart, r.ResultReference.PipelineTask, v1.ResultResultPart, r.ResultReference.Result),
fmt.Sprintf("%s.%s.%s[%q]", v1.ResultTaskPart, r.ResultReference.PipelineTask, v1.ResultResultPart, r.ResultReference.Result),
fmt.Sprintf("%s.%s.%s['%s']", v1.ResultTaskPart, r.ResultReference.PipelineTask, v1.ResultResultPart, r.ResultReference.Result),
}
}
func (r *ResolvedResultRef) getReplaceTargetfromArrayIndex(idx int) []string {
return []string{
fmt.Sprintf("%s.%s.%s.%s[%d]", v1.ResultTaskPart, r.ResultReference.PipelineTask, v1.ResultResultPart, r.ResultReference.Result, idx),
fmt.Sprintf("%s.%s.%s[%q][%d]", v1.ResultTaskPart, r.ResultReference.PipelineTask, v1.ResultResultPart, r.ResultReference.Result, idx),
fmt.Sprintf("%s.%s.%s['%s'][%d]", v1.ResultTaskPart, r.ResultReference.PipelineTask, v1.ResultResultPart, r.ResultReference.Result, idx),
}
}
func (r *ResolvedResultRef) getReplaceTargetfromObjectKey(key string) []string {
return []string{
fmt.Sprintf("%s.%s.%s.%s.%s", v1.ResultTaskPart, r.ResultReference.PipelineTask, v1.ResultResultPart, r.ResultReference.Result, key),
fmt.Sprintf("%s.%s.%s[%q][%s]", v1.ResultTaskPart, r.ResultReference.PipelineTask, v1.ResultResultPart, r.ResultReference.Result, key),
fmt.Sprintf("%s.%s.%s['%s'][%s]", v1.ResultTaskPart, r.ResultReference.PipelineTask, v1.ResultResultPart, r.ResultReference.Result, key),
}
}
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"fmt"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
// ValidatePipelineTaskResults ensures that any result references used by pipeline tasks
// resolve to valid results. This prevents a situation where a PipelineTask references
// a result in another PipelineTask that doesn't exist or where the user has either misspelled
// a result name or the referenced task just doesn't return a result with that name.
func ValidatePipelineTaskResults(state PipelineRunState) error {
ptMap := state.ToMap()
for _, rpt := range state {
for _, ref := range v1.PipelineTaskResultRefs(rpt.PipelineTask) {
if err := validateResultRef(ref, ptMap); err != nil {
return pipelineErrors.WrapUserError(fmt.Errorf("invalid result reference in pipeline task %q: %w", rpt.PipelineTask.Name, err))
}
}
}
return nil
}
// ValidatePipelineResults ensures that any result references used by PipelineResults
// resolve to valid results. This prevents a situation where a PipelineResult references
// a result in a PipelineTask that doesn't exist or where the user has either misspelled
// a result name or the referenced task just doesn't return a result with that name.
func ValidatePipelineResults(ps *v1.PipelineSpec, state PipelineRunState) error {
ptMap := state.ToMap()
for _, result := range ps.Results {
expressions, _ := result.GetVarSubstitutionExpressions()
refs := v1.NewResultRefs(expressions)
for _, ref := range refs {
if err := validateResultRef(ref, ptMap); err != nil {
return fmt.Errorf("invalid pipeline result %q: %w", result.Name, err)
}
}
}
return nil
}
// validateResultRef takes a ResultRef and searches for the result using the given
// map of PipelineTask name to ResolvedPipelineTask. If the ResultRef does not point
// to a pipeline task or named result then an error is returned.
func validateResultRef(ref *v1.ResultRef, ptMap map[string]*ResolvedPipelineTask) error {
if _, ok := ptMap[ref.PipelineTask]; !ok {
return fmt.Errorf("referenced pipeline task %q does not exist", ref.PipelineTask)
}
taskProvidesResult := false
if ptMap[ref.PipelineTask].CustomTask {
// We're not able to validate results pointing to custom tasks because
// there's no facility to check what the result names will be before the
// custom task executes.
return nil
}
if ptMap[ref.PipelineTask].ResolvedTask == nil || ptMap[ref.PipelineTask].ResolvedTask.TaskSpec == nil {
return fmt.Errorf("unable to validate result referencing pipeline task %q: task spec not found", ref.PipelineTask)
}
for _, taskResult := range ptMap[ref.PipelineTask].ResolvedTask.TaskSpec.Results {
if taskResult.Name == ref.Result {
taskProvidesResult = true
break
}
}
if !taskProvidesResult {
return fmt.Errorf("%q is not a named result returned by pipeline task %q", ref.Result, ref.PipelineTask)
}
return nil
}
// ValidateOptionalWorkspaces validates that any workspaces in the Pipeline that are
// marked as optional are also marked optional in the Tasks that receive them. This
// prevents a situation where a Task requires a workspace but a Pipeline does not offer
// the same guarantee the workspace will be provided at runtime.
func ValidateOptionalWorkspaces(pipelineWorkspaces []v1.PipelineWorkspaceDeclaration, state PipelineRunState) error {
optionalWorkspaces := sets.NewString()
for _, ws := range pipelineWorkspaces {
if ws.Optional {
optionalWorkspaces.Insert(ws.Name)
}
}
for _, rpt := range state {
for _, pws := range rpt.PipelineTask.Workspaces {
if rpt.ResolvedTask != nil && rpt.ResolvedTask.TaskSpec != nil && optionalWorkspaces.Has(pws.Workspace) {
for _, tws := range rpt.ResolvedTask.TaskSpec.Workspaces {
if tws.Name == pws.Name && !tws.Optional {
return fmt.Errorf("pipeline workspace %q is marked optional but pipeline task %q requires it be provided", pws.Workspace, rpt.PipelineTask.Name)
}
}
}
}
}
return nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"fmt"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/list"
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun"
trresources "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources"
)
// ValidateParamTypesMatching validate that parameters in PipelineRun override corresponding parameters in Pipeline of the same type.
func ValidateParamTypesMatching(p *v1.PipelineSpec, pr *v1.PipelineRun) error {
// Build a map of parameter names/types declared in p.
paramTypes := make(map[string]v1.ParamType)
for _, param := range p.Params {
paramTypes[param.Name] = param.Type
}
// Build a list of parameter names from pr that have mismatching types with the map created above.
var wrongTypeParamNames []string
for _, param := range pr.Spec.Params {
if paramType, ok := paramTypes[param.Name]; ok {
if param.Value.Type != paramType {
wrongTypeParamNames = append(wrongTypeParamNames, param.Name)
}
}
}
// Return an error with the misconfigured parameters' names, or return nil if there are none.
if len(wrongTypeParamNames) != 0 {
return pipelineErrors.WrapUserError(fmt.Errorf("parameters have inconsistent types : %s", wrongTypeParamNames))
}
return nil
}
// ValidateRequiredParametersProvided validates that all the parameters expected by the Pipeline are provided by the PipelineRun.
// Extra Parameters are allowed, the Pipeline will use the Parameters it needs and ignore the other Parameters.
func ValidateRequiredParametersProvided(pipelineParameters *v1.ParamSpecs, pipelineRunParameters *v1.Params) error {
// Build a list of parameter names declared in pr.
var providedParams []string
for _, param := range *pipelineRunParameters {
providedParams = append(providedParams, param.Name)
}
var requiredParams []string
for _, param := range *pipelineParameters {
if param.Default == nil { // include only parameters that don't have default values specified in the Pipeline
requiredParams = append(requiredParams, param.Name)
}
}
// Build a list of parameter names in p that are missing from pr.
missingParams := list.DiffLeft(requiredParams, providedParams)
// Return an error with the missing parameters' names, or return nil if there are none.
if len(missingParams) != 0 {
return pipelineErrors.WrapUserError(fmt.Errorf("pipelineRun missing parameters: %s", missingParams))
}
return nil
}
// ValidateObjectParamRequiredKeys validates that the required keys of all the object parameters expected by the Pipeline are provided by the PipelineRun.
func ValidateObjectParamRequiredKeys(pipelineParameters []v1.ParamSpec, pipelineRunParameters []v1.Param) error {
missings := taskrun.MissingKeysObjectParamNames(pipelineParameters, pipelineRunParameters)
if len(missings) != 0 {
return pipelineErrors.WrapUserError(fmt.Errorf("pipelineRun missing object keys for parameters: %v", missings))
}
return nil
}
// ValidateParameterTypesInMatrix validates the type of Parameter for Matrix.Params
// and Matrix.Include.Params after any replacements are made from Task parameters or results
// Matrix.Params must be of type array. Matrix.Include.Params must be of type string.
func ValidateParameterTypesInMatrix(state PipelineRunState) error {
for _, rpt := range state {
m := rpt.PipelineTask.Matrix
if m.HasInclude() {
for _, include := range m.Include {
for _, param := range include.Params {
if param.Value.Type != v1.ParamTypeString {
return fmt.Errorf("parameters of type string only are allowed, but param \"%s\" has type \"%s\" in pipelineTask \"%s\"",
param.Name, string(param.Value.Type), rpt.PipelineTask.Name)
}
}
}
}
if m.HasParams() {
for _, param := range m.Params {
if param.Value.Type != v1.ParamTypeArray {
// If it's an array type that contains result references because it's consuming results
// from a Matrixed PipelineTask continue
if ps, ok := param.GetVarSubstitutionExpressions(); ok {
if v1.LooksLikeContainsResultRefs(ps) {
continue
}
}
return fmt.Errorf("parameters of type array only are allowed, but param \"%s\" has type \"%s\" in pipelineTask \"%s\"",
param.Name, string(param.Value.Type), rpt.PipelineTask.Name)
}
}
}
}
return nil
}
// ValidateParamArrayIndex validates if the param reference to an array param is out of bound.
// error is returned when the array indexing reference is out of bound of the array param
// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2.
func ValidateParamArrayIndex(ps *v1.PipelineSpec, params v1.Params) error {
return trresources.ValidateOutOfBoundArrayParams(ps.Params, params, ps.GetIndexingReferencesToArrayParams())
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipelinerun
import (
"context"
"encoding/json"
"fmt"
"log"
"strings"
"time"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
"go.uber.org/zap"
"gomodules.xyz/jsonpatch/v2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/apis"
)
var timeoutTaskRunPatchBytes, timeoutCustomRunPatchBytes []byte
func init() {
var err error
timeoutTaskRunPatchBytes, err = json.Marshal([]jsonpatch.JsonPatchOperation{
{
Operation: "add",
Path: "/spec/status",
Value: v1.TaskRunSpecStatusCancelled,
},
{
Operation: "add",
Path: "/spec/statusMessage",
Value: v1.TaskRunCancelledByPipelineTimeoutMsg,
}})
if err != nil {
log.Fatalf("failed to marshal TaskRun timeout patch bytes: %v", err)
}
timeoutCustomRunPatchBytes, err = json.Marshal([]jsonpatch.JsonPatchOperation{
{
Operation: "add",
Path: "/spec/status",
Value: v1beta1.CustomRunSpecStatusCancelled,
},
{
Operation: "add",
Path: "/spec/statusMessage",
Value: v1beta1.CustomRunCancelledByPipelineTimeoutMsg,
}})
if err != nil {
log.Fatalf("failed to marshal CustomRun timeout patch bytes: %v", err)
}
}
// timeoutPipelineRun marks the PipelineRun as timed out and any resolved TaskRun(s) too.
func timeoutPipelineRun(ctx context.Context, logger *zap.SugaredLogger, pr *v1.PipelineRun, clientSet clientset.Interface) error {
errs := timeoutPipelineTasks(ctx, logger, pr, clientSet)
// If we successfully timed out all the TaskRuns and Runs, we can consider the PipelineRun timed out.
if len(errs) == 0 {
pr.SetTimeoutCondition(ctx)
// update pr completed time
pr.Status.CompletionTime = &metav1.Time{Time: time.Now()}
} else {
e := strings.Join(errs, "\n")
// Indicate that we failed to time out the PipelineRun
pr.Status.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
Reason: v1.PipelineRunReasonCouldntTimeOut.String(),
Message: fmt.Sprintf("PipelineRun %q was timed out but had errors trying to time out TaskRuns and/or Runs: %s", pr.Name, e),
})
return fmt.Errorf("error(s) from timing out TaskRun(s) from PipelineRun %s: %s", pr.Name, e)
}
return nil
}
func timeoutCustomRun(ctx context.Context, customRunName string, namespace string, clientSet clientset.Interface) error {
_, err := clientSet.TektonV1beta1().CustomRuns(namespace).Patch(ctx, customRunName, types.JSONPatchType, timeoutCustomRunPatchBytes, metav1.PatchOptions{}, "")
if errors.IsNotFound(err) {
return nil
}
return err
}
func timeoutTaskRun(ctx context.Context, taskRunName string, namespace string, clientSet clientset.Interface) error {
_, err := clientSet.TektonV1().TaskRuns(namespace).Patch(ctx, taskRunName, types.JSONPatchType, timeoutTaskRunPatchBytes, metav1.PatchOptions{}, "")
if errors.IsNotFound(err) {
return nil
}
return err
}
// timeoutPipelineTaskRuns patches `TaskRun` and `Run` with canceled status and an appropriate message
func timeoutPipelineTasks(ctx context.Context, logger *zap.SugaredLogger, pr *v1.PipelineRun, clientSet clientset.Interface) []string {
return timeoutPipelineTasksForTaskNames(ctx, logger, pr, clientSet, sets.NewString())
}
// timeoutPipelineTasksForTaskNames patches `TaskRun`s and `Run`s for the given task names, or all if no task names are given, with canceled status and appropriate message
func timeoutPipelineTasksForTaskNames(ctx context.Context, logger *zap.SugaredLogger, pr *v1.PipelineRun, clientSet clientset.Interface, taskNames sets.String) []string {
errs := []string{}
trNames, customRunNames, err := getChildObjectsFromPRStatusForTaskNames(ctx, pr.Status, taskNames)
if err != nil {
errs = append(errs, err.Error())
}
for _, taskRunName := range trNames {
logger.Infof("patching TaskRun %s for timeout", taskRunName)
if err := timeoutTaskRun(ctx, taskRunName, pr.Namespace, clientSet); err != nil {
if pipelineErrors.IsImmutableTaskRunSpecError(err) {
// The TaskRun may have completed and the spec field is immutable, we should ignore this error.
continue
}
errs = append(errs, fmt.Errorf("failed to patch TaskRun `%s` with timeout: %w", taskRunName, err).Error())
continue
}
}
for _, custonRunName := range customRunNames {
logger.Infof("patching CustomRun %s for timeout", custonRunName)
if err := timeoutCustomRun(ctx, custonRunName, pr.Namespace, clientSet); err != nil {
errs = append(errs, fmt.Errorf("failed to patch CustomRun `%s` with timeout: %w", custonRunName, err).Error())
continue
}
}
return errs
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipelinerun
import (
"context"
"encoding/json"
"errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
"knative.dev/pkg/logging"
)
const (
// TracerName is the name of the tracer
TracerName = "PipelineRunReconciler"
// SpanContextAnnotation is the name of the Annotation for storing SpanContext
SpanContextAnnotation = "tekton.dev/pipelinerunSpanContext"
// TaskRunSpanContextAnnotation is the name of the Annotation used for propagating SpanContext to TaskRun
TaskRunSpanContextAnnotation = "tekton.dev/taskrunSpanContext"
)
// initialize tracing by creating the root span and injecting the
// spanContext is propagated through annotations in the CR
func initTracing(ctx context.Context, tracerProvider trace.TracerProvider, pr *v1.PipelineRun) context.Context {
logger := logging.FromContext(ctx)
pro := otel.GetTextMapPropagator()
// SpanContext was created already
if len(pr.Status.SpanContext) > 0 {
return pro.Extract(ctx, propagation.MapCarrier(pr.Status.SpanContext))
}
spanContext := make(map[string]string)
// SpanContext was propagated through annotations
if pr.Annotations != nil && pr.Annotations[SpanContextAnnotation] != "" {
err := json.Unmarshal([]byte(pr.Annotations[SpanContextAnnotation]), &spanContext)
if err != nil {
logger.Error("unable to unmarshal spancontext, err: %s", err)
}
pr.Status.SpanContext = spanContext
return pro.Extract(ctx, propagation.MapCarrier(pr.Status.SpanContext))
}
// Create a new root span since there was no parent spanContext provided through annotations
ctxWithTrace, span := tracerProvider.Tracer(TracerName).Start(ctx, "PipelineRun:Reconciler")
defer span.End()
span.SetAttributes(attribute.String("pipelinerun", pr.Name), attribute.String("namespace", pr.Namespace))
pro.Inject(ctxWithTrace, propagation.MapCarrier(spanContext))
logger.Debug("got tracing carrier", spanContext)
if len(spanContext) == 0 {
logger.Debug("tracerProvider doesn't provide a traceId, tracing is disabled")
return ctx
}
span.AddEvent("updating PipelineRun status with SpanContext")
pr.Status.SpanContext = spanContext
return ctxWithTrace
}
// Extract spanContext from the context and return it as json encoded string
func getMarshalledSpanFromContext(ctx context.Context) (string, error) {
carrier := make(map[string]string)
pro := otel.GetTextMapPropagator()
pro.Inject(ctx, propagation.MapCarrier(carrier))
if len(carrier) == 0 {
return "", errors.New("spanContext not present in the context, unable to marshall")
}
marshalled, err := json.Marshal(carrier)
if err != nil {
return "", err
}
if len(marshalled) >= 1024 {
return "", errors.New("marshalled spanContext size is too big")
}
return string(marshalled), nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resolutionrequest
import (
"context"
"github.com/tektoncd/pipeline/pkg/apis/config"
resolutionrequestinformer "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest"
resolutionrequestreconciler "github.com/tektoncd/pipeline/pkg/client/resolution/injection/reconciler/resolution/v1beta1/resolutionrequest"
"k8s.io/utils/clock"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
)
// NewController returns a func that returns a knative controller for processing
// ResolutionRequest objects.
func NewController(clock clock.PassiveClock) func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
logger := logging.FromContext(ctx)
configStore := config.NewStore(logger.Named("config-store"))
configStore.WatchConfigs(cmw)
r := &Reconciler{
clock: clock,
}
impl := resolutionrequestreconciler.NewImpl(ctx, r, func(impl *controller.Impl) controller.Options {
return controller.Options{
ConfigStore: configStore,
}
})
reqinformer := resolutionrequestinformer.Get(ctx)
if _, err := reqinformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)); err != nil {
logging.FromContext(ctx).Panicf("Couldn't register ResolutionRequest informer event handler: %w", err)
}
return impl
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resolutionrequest
import (
"context"
"fmt"
"time"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
rrreconciler "github.com/tektoncd/pipeline/pkg/client/resolution/injection/reconciler/resolution/v1beta1/resolutionrequest"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
"knative.dev/pkg/controller"
"knative.dev/pkg/reconciler"
)
// Reconciler is a knative reconciler for processing ResolutionRequest
// objects
type Reconciler struct {
clock clock.PassiveClock
}
var _ rrreconciler.Interface = (*Reconciler)(nil)
// ReconcileKind processes updates to ResolutionRequests, sets status
// fields on it, and returns any errors experienced along the way.
func (r *Reconciler) ReconcileKind(ctx context.Context, rr *v1beta1.ResolutionRequest) reconciler.Event {
if rr == nil {
return nil
}
if rr.IsDone() {
return nil
}
if rr.Status.GetCondition(apis.ConditionSucceeded) == nil {
rr.Status.InitializeConditions()
}
maximumResolutionDuration := config.FromContextOrDefaults(ctx).Defaults.DefaultMaximumResolutionTimeout
switch {
case rr.Status.Data != "":
rr.Status.MarkSucceeded()
case requestDuration(rr) > maximumResolutionDuration:
rr.Status.MarkFailed(resolutioncommon.ReasonResolutionTimedOut, timeoutMessage(maximumResolutionDuration))
default:
rr.Status.MarkInProgress(resolutioncommon.MessageWaitingForResolver)
return controller.NewRequeueAfter(maximumResolutionDuration - requestDuration(rr))
}
return nil
}
// requestDuration returns the amount of time that has passed since a
// given ResolutionRequest was created.
func requestDuration(rr *v1beta1.ResolutionRequest) time.Duration {
creationTime := rr.ObjectMeta.CreationTimestamp.DeepCopy().Time.UTC()
return time.Now().UTC().Sub(creationTime)
}
func timeoutMessage(timeout time.Duration) string {
return fmt.Sprintf("resolution took longer than global timeout of %s", timeout)
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconciler
import (
"time"
"knative.dev/pkg/kmeta"
)
const (
// minimumResourceAge is the age at which resources stop being IsYoungResource.
minimumResourceAge = 5 * time.Second
)
// IsYoungResource checks whether the resource is younger than minimumResourceAge, based on its creation timestamp.
func IsYoungResource(obj kmeta.Accessor) bool {
return time.Since(obj.GetCreationTimestamp().Time) < minimumResourceAge
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package taskrun
import (
"context"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client"
taskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun"
verificationpolicyinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy"
taskrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun"
resolutionclient "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client"
resolutioninformer "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest"
"github.com/tektoncd/pipeline/pkg/pod"
cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
resolution "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
"github.com/tektoncd/pipeline/pkg/spire"
"github.com/tektoncd/pipeline/pkg/taskrunmetrics"
"github.com/tektoncd/pipeline/pkg/tracing"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/clock"
kubeclient "knative.dev/pkg/client/injection/kube/client"
limitrangeinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange"
filteredpodinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod/filtered"
secretinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/secret"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
)
const (
// TracerProviderName is the name of TraceProvider
TracerProviderName = "taskrun-reconciler"
)
var taskRunFilterManagedBy = func(obj interface{}) bool {
tr, ok := obj.(*v1.TaskRun)
if !ok {
return true
}
// The taskrun-controller should not reconcile TaskRuns managed by other controllers.
if tr.Spec.ManagedBy != nil && *tr.Spec.ManagedBy != pipeline.ManagedBy {
return false
}
return true
}
// NewController instantiates a new controller.Impl from knative.dev/pkg/controller
func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(context.Context, configmap.Watcher) *controller.Impl {
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
logger := logging.FromContext(ctx)
kubeclientset := kubeclient.Get(ctx)
pipelineclientset := pipelineclient.Get(ctx)
taskRunInformer := taskruninformer.Get(ctx)
podInformer := filteredpodinformer.Get(ctx, v1.ManagedByLabelKey)
limitrangeInformer := limitrangeinformer.Get(ctx)
verificationpolicyInformer := verificationpolicyinformer.Get(ctx)
resolutionInformer := resolutioninformer.Get(ctx)
secretinformer := secretinformer.Get(ctx)
spireClient := spire.GetControllerAPIClient(ctx)
tracerProvider := tracing.New(TracerProviderName, logger.Named("tracing"))
taskrunmetricsRecorder := taskrunmetrics.Get(ctx)
//nolint:contextcheck // OnStore methods does not support context as a parameter
configStore := config.NewStore(logger.Named("config-store"),
taskrunmetrics.OnStore(logger, taskrunmetricsRecorder),
spire.OnStore(ctx, logger),
tracerProvider.OnStore(secretinformer.Lister()),
)
configStore.WatchConfigs(cmw)
entrypointCache, err := pod.NewEntrypointCache(kubeclientset)
if err != nil {
logger.Fatalf("Error creating entrypoint cache: %v", err)
}
c := &Reconciler{
KubeClientSet: kubeclientset,
PipelineClientSet: pipelineclientset,
Images: opts.Images,
Clock: clock,
spireClient: spireClient,
taskRunLister: taskRunInformer.Lister(),
limitrangeLister: limitrangeInformer.Lister(),
verificationPolicyLister: verificationpolicyInformer.Lister(),
cloudEventClient: cloudeventclient.Get(ctx),
metrics: taskrunmetricsRecorder,
entrypointCache: entrypointCache,
podLister: podInformer.Lister(),
pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger),
resolutionRequester: resolution.NewCRDRequester(resolutionclient.Get(ctx), resolutionInformer.Lister()),
tracerProvider: tracerProvider,
}
impl := taskrunreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options {
return controller.Options{
AgentName: pipeline.TaskRunControllerName,
ConfigStore: configStore,
PromoteFilterFunc: taskRunFilterManagedBy,
}
})
if _, err := secretinformer.Informer().AddEventHandler(controller.HandleAll(tracerProvider.Handler)); err != nil {
logging.FromContext(ctx).Panicf("Couldn't register Secret informer event handler: %w", err)
}
if _, err := taskRunInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: taskRunFilterManagedBy,
Handler: controller.HandleAll(impl.Enqueue),
}); err != nil {
logging.FromContext(ctx).Panicf("Couldn't register TaskRun informer event handler: %w", err)
}
if _, err := podInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: controller.FilterController(&v1.TaskRun{}),
Handler: controller.HandleAll(impl.EnqueueControllerOf),
}); err != nil {
logging.FromContext(ctx).Panicf("Couldn't register Pod informer event handler: %w", err)
}
return impl
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"context"
"errors"
"fmt"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"github.com/tektoncd/pipeline/internal/artifactref"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
podtpl "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/container"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
"github.com/tektoncd/pipeline/pkg/pod"
"github.com/tektoncd/pipeline/pkg/substitution"
"github.com/tektoncd/pipeline/pkg/workspace"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
// objectIndividualVariablePattern is the reference pattern for object individual keys params.<object_param_name>.<key_name>
objectIndividualVariablePattern = "params.%s.%s"
)
var (
paramPatterns = []string{
"params.%s",
"params[%q]",
"params['%s']",
// FIXME(vdemeester) Remove that with deprecating v1beta1
"inputs.params.%s",
}
substitutionToParamNamePatterns = []string{
`^params\.(\w+)$`,
`^params\["([^"]+)"\]$`,
`^params\['([^']+)'\]$`,
// FIXME(vdemeester) Remove that with deprecating v1beta1
`^inputs\.params\.(\w+)$`,
}
paramIndexRegexPatterns = []string{
`\$\(params.%s\[([0-9]*)*\*?\]\)`,
`\$\(params\[%q\]\[([0-9]*)*\*?\]\)`,
`\$\(params\['%s'\]\[([0-9]*)*\*?\]\)`,
}
)
// applyStepActionParameters applies the params from the task and the underlying step to the referenced stepaction.
// substitution order:
// 1. taskrun parameter values in step parameters
// 2. step-provided parameter values
// 3. default values that reference other parameters
// 4. simple default values
// 5. step result references
func applyStepActionParameters(step *v1.Step, spec *v1.TaskSpec, tr *v1.TaskRun, stepParams v1.Params, defaults []v1.ParamSpec) (*v1.Step, error) {
// 1. taskrun parameter substitutions to step parameters
if stepParams != nil {
stringR, arrayR, objectR := getTaskParameters(spec, tr, spec.Params...)
stepParams = stepParams.ReplaceVariables(stringR, arrayR, objectR)
}
// 2. step provided parameters
stepProvidedParams := make(map[string]v1.ParamValue)
for _, sp := range stepParams {
stepProvidedParams[sp.Name] = sp.Value
}
// 3,4. get replacements from default params (both referenced and simple)
stringReplacements, arrayReplacements, objectReplacements := replacementsFromDefaultParams(defaults)
// process parameter values in order of substitution (2,3,4)
processedParams := make([]v1.Param, 0, len(defaults))
// keep track of parameters that need resolution and their references
paramsNeedingResolution := make(map[string]bool)
paramReferenceMap := make(map[string][]string) // maps param name to names of params it references
// collect parameter references and handle parameters without references
for _, p := range defaults {
// 2. step provided parameters
if value, exists := stepProvidedParams[p.Name]; exists {
// parameter provided by step, add it to processed
processedParams = append(processedParams, v1.Param{
Name: p.Name,
Value: value,
})
continue
}
// 3. default params
if p.Default != nil {
if !strings.Contains(p.Default.StringVal, "$(params.") {
// parameter has no references, add it to processed
processedParams = append(processedParams, v1.Param{
Name: p.Name,
Value: *p.Default,
})
continue
}
// parameter has references to other parameters, track them >:(
paramsNeedingResolution[p.Name] = true
matches, _ := substitution.ExtractVariableExpressions(p.Default.StringVal, "params")
referencedParams := make([]string, 0, len(matches))
for _, match := range matches {
paramName := strings.TrimSuffix(strings.TrimPrefix(match, "$(params."), ")")
referencedParams = append(referencedParams, paramName)
}
paramReferenceMap[p.Name] = referencedParams
}
}
// process parameters until no more can be resolved
for len(paramsNeedingResolution) > 0 {
paramWasResolved := false
// track unresolved params and their references
unresolvedParams := make(map[string][]string)
for paramName := range paramsNeedingResolution {
canResolveParam := true
for _, referencedParam := range paramReferenceMap[paramName] {
// Check if referenced parameter is processed
isReferenceResolved := false
for _, pp := range processedParams {
if pp.Name == referencedParam {
isReferenceResolved = true
break
}
}
if !isReferenceResolved {
canResolveParam = false
unresolvedParams[paramName] = append(unresolvedParams[paramName], referencedParam)
break
}
}
if canResolveParam {
// process this parameter as all its references have been resolved
for _, p := range defaults {
if p.Name == paramName {
defaultValue := *p.Default
resolvedValue := defaultValue.StringVal
// hydrate parameter references
for _, referencedParam := range paramReferenceMap[paramName] {
for _, pp := range processedParams {
if pp.Name == referencedParam {
resolvedValue = strings.ReplaceAll(
resolvedValue,
fmt.Sprintf("$(params.%s)", referencedParam),
pp.Value.StringVal,
)
break
}
}
}
defaultValue.StringVal = resolvedValue
processedParams = append(processedParams, v1.Param{
Name: paramName,
Value: defaultValue,
})
delete(paramsNeedingResolution, paramName)
paramWasResolved = true
break
}
}
}
}
// unresolvable parameters or circular dependencies
if !paramWasResolved {
// check parameter references to a non-existent parameter
for param, unresolvedRefs := range unresolvedParams {
// check referenced parameters in defaults
for _, ref := range unresolvedRefs {
exists := false
for _, p := range defaults {
if p.Name == ref {
exists = true
break
}
}
if !exists {
return nil, fmt.Errorf("parameter %q references non-existent parameter %q", param, ref)
}
}
// parameters exist but can't be resolved hence it's a circular dependency
return nil, errors.New("circular dependency detected in parameter references")
}
}
}
// apply the processed parameters and merge all replacements (2,3,4)
procStringReplacements, procArrayReplacements, procObjectReplacements := replacementsFromParams(processedParams)
// merge replacements from defaults and processed params
for k, v := range procStringReplacements {
stringReplacements[k] = v
}
for k, v := range procArrayReplacements {
arrayReplacements[k] = v
}
for k, v := range procObjectReplacements {
if objectReplacements[k] == nil {
objectReplacements[k] = v
} else {
for key, val := range v {
objectReplacements[k][key] = val
}
}
}
// 5. set step result replacements last
if stepResultReplacements, err := replacementsFromStepResults(step, stepParams, defaults); err != nil {
return nil, err
} else {
// merge step result replacements into string replacements last
for k, v := range stepResultReplacements {
stringReplacements[k] = v
}
}
// check if there are duplicate keys in the replacements
// if the same key is present in both stringReplacements and arrayReplacements, it means
// that the default value and the passed value have different types.
if err := checkForDuplicateKeys(stringReplacements, arrayReplacements); err != nil {
return nil, err
}
container.ApplyStepReplacements(step, stringReplacements, arrayReplacements)
return step, nil
}
// checkForDuplicateKeys checks if there are duplicate keys in the replacements
func checkForDuplicateKeys(stringReplacements map[string]string, arrayReplacements map[string][]string) error {
keys := make([]string, 0, len(stringReplacements))
for k := range stringReplacements {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
if _, ok := arrayReplacements[k]; ok {
paramName := paramNameFromReplacementKey(k)
return fmt.Errorf("invalid parameter substitution: %s. Please check the types of the default value and the passed value", paramName)
}
}
return nil
}
// paramNameFromReplacementKey returns the param name from the replacement key in best effort
func paramNameFromReplacementKey(key string) string {
for _, regexPattern := range substitutionToParamNamePatterns {
re := regexp.MustCompile(regexPattern)
if matches := re.FindStringSubmatch(key); matches != nil {
return matches[1]
}
}
// If no match is found, return the key
return key
}
// findArrayIndexParamUsage finds the array index in a string using array param substitution
func findArrayIndexParamUsage(s string, paramName string, stepName string, resultName string, stringReplacements map[string]string) map[string]string {
for _, pattern := range paramIndexRegexPatterns {
arrayIndexingRegex := regexp.MustCompile(fmt.Sprintf(pattern, paramName))
matches := arrayIndexingRegex.FindAllStringSubmatch(s, -1)
for _, match := range matches {
if len(match) == 2 {
key := strings.TrimSuffix(strings.TrimPrefix(match[0], "$("), ")")
if match[1] != "" {
stringReplacements[key] = fmt.Sprintf("$(steps.%s.results.%s[%s])", stepName, resultName, match[1])
}
}
}
}
return stringReplacements
}
// replacementsArrayIdxStepResults looks for Step Result array usage with index in the Step's command, args, env and script.
func replacementsArrayIdxStepResults(step *v1.Step, paramName string, stepName string, resultName string) map[string]string {
stringReplacements := map[string]string{}
for _, c := range step.Command {
stringReplacements = findArrayIndexParamUsage(c, paramName, stepName, resultName, stringReplacements)
}
for _, a := range step.Args {
stringReplacements = findArrayIndexParamUsage(a, paramName, stepName, resultName, stringReplacements)
}
for _, e := range step.Env {
stringReplacements = findArrayIndexParamUsage(e.Value, paramName, stepName, resultName, stringReplacements)
}
return stringReplacements
}
// replacementsFromStepResults generates string replacements for params whose values is a variable substitution of a step result.
func replacementsFromStepResults(step *v1.Step, stepParams v1.Params, defaults []v1.ParamSpec) (map[string]string, error) {
stringReplacements := map[string]string{}
for _, sp := range stepParams {
if sp.Value.StringVal != "" && strings.HasPrefix(sp.Value.StringVal, "$(steps.") {
// eg: when parameter p1 references a step result, replace:
// $(params.p1) with $(steps.step1.results.foo)
value := strings.TrimSuffix(strings.TrimPrefix(sp.Value.StringVal, "$("), ")")
pr, err := resultref.ParseStepExpression(value)
if err != nil {
return nil, err
}
for _, d := range defaults {
if d.Name == sp.Name {
switch d.Type {
case v1.ParamTypeObject:
for k := range d.Properties {
stringReplacements[fmt.Sprintf("params.%s.%s", d.Name, k)] = fmt.Sprintf("$(steps.%s.results.%s.%s)", pr.ResourceName, pr.ResultName, k)
}
case v1.ParamTypeArray:
// for array parameters
// with star notation, replace:
// $(params.p1[*]) with $(steps.step1.results.foo[*])
for _, pattern := range paramPatterns {
stringReplacements[fmt.Sprintf(pattern+"[*]", d.Name)] = fmt.Sprintf("$(steps.%s.results.%s[*])", pr.ResourceName, pr.ResultName)
}
// with index notation, replace:
// $(params.p1[idx]) with $(steps.step1.results.foo[idx])
for k, v := range replacementsArrayIdxStepResults(step, d.Name, pr.ResourceName, pr.ResultName) {
stringReplacements[k] = v
}
case v1.ParamTypeString:
fallthrough
default:
// for string parameters and default case,
// replace any reference to the parameter with the step result reference
// since both use simple value substitution
// eg: replace $(params.p1) with $(steps.step1.results.foo)
for _, pattern := range paramPatterns {
stringReplacements[fmt.Sprintf(pattern, d.Name)] = sp.Value.StringVal
}
}
}
}
}
}
return stringReplacements, nil
}
// getTaskParameters gets the string, array and object parameter variable replacements needed in the Task
func getTaskParameters(spec *v1.TaskSpec, tr *v1.TaskRun, defaults ...v1.ParamSpec) (map[string]string, map[string][]string, map[string]map[string]string) {
// This assumes that the TaskRun inputs have been validated against what the Task requests.
// Set params from Task defaults
stringReplacements, arrayReplacements, objectReplacements := replacementsFromDefaultParams(defaults)
// Set and overwrite params with the ones from the TaskRun
trStrings, trArrays, trObjects := replacementsFromParams(tr.Spec.Params)
for k, v := range trStrings {
stringReplacements[k] = v
}
for k, v := range trArrays {
arrayReplacements[k] = v
}
for k, v := range trObjects {
for key, val := range v {
if objectReplacements != nil {
if objectReplacements[k] != nil {
objectReplacements[k][key] = val
} else {
objectReplacements[k] = v
}
}
}
}
return stringReplacements, arrayReplacements, objectReplacements
}
// ApplyParameters applies the params from a TaskRun.Parameters to a TaskSpec
func ApplyParameters(spec *v1.TaskSpec, tr *v1.TaskRun, defaults ...v1.ParamSpec) *v1.TaskSpec {
stringReplacements, arrayReplacements, objectReplacements := getTaskParameters(spec, tr, defaults...)
return ApplyReplacements(spec, stringReplacements, arrayReplacements, objectReplacements)
}
func replacementsFromDefaultParams(defaults v1.ParamSpecs) (map[string]string, map[string][]string, map[string]map[string]string) {
stringReplacements := map[string]string{}
arrayReplacements := map[string][]string{}
objectReplacements := map[string]map[string]string{}
// First pass: collect all non-reference default values
for _, p := range defaults {
if p.Default != nil && !strings.Contains(p.Default.StringVal, "$(params.") {
switch p.Default.Type {
case v1.ParamTypeArray:
for _, pattern := range paramPatterns {
for i := range len(p.Default.ArrayVal) {
stringReplacements[fmt.Sprintf(pattern+"[%d]", p.Name, i)] = p.Default.ArrayVal[i]
}
arrayReplacements[fmt.Sprintf(pattern, p.Name)] = p.Default.ArrayVal
}
case v1.ParamTypeObject:
for _, pattern := range paramPatterns {
objectReplacements[fmt.Sprintf(pattern, p.Name)] = p.Default.ObjectVal
}
for k, v := range p.Default.ObjectVal {
stringReplacements[fmt.Sprintf(objectIndividualVariablePattern, p.Name, k)] = v
}
case v1.ParamTypeString:
fallthrough
default:
for _, pattern := range paramPatterns {
stringReplacements[fmt.Sprintf(pattern, p.Name)] = p.Default.StringVal
}
}
}
}
// Second pass: handle parameter references in default values
for _, p := range defaults {
if p.Default != nil && strings.Contains(p.Default.StringVal, "$(params.") {
// extract referenced parameter name
matches, _ := substitution.ExtractVariableExpressions(p.Default.StringVal, "params")
for _, match := range matches {
paramName := strings.TrimPrefix(match, "$(params.")
paramName = strings.TrimSuffix(paramName, ")")
// find referenced parameter value
for _, pattern := range paramPatterns {
key := fmt.Sprintf(pattern, paramName)
if value, exists := stringReplacements[key]; exists {
// Apply the value to this parameter's default
resolvedValue := strings.ReplaceAll(p.Default.StringVal, match, value)
for _, pattern := range paramPatterns {
stringReplacements[fmt.Sprintf(pattern, p.Name)] = resolvedValue
}
break
}
}
}
}
}
return stringReplacements, arrayReplacements, objectReplacements
}
func replacementsFromParams(params v1.Params) (map[string]string, map[string][]string, map[string]map[string]string) {
// stringReplacements is used for standard single-string stringReplacements, while arrayReplacements contains arrays
// and objectReplacements contains objects that need to be further processed.
stringReplacements := map[string]string{}
arrayReplacements := map[string][]string{}
objectReplacements := map[string]map[string]string{}
for _, p := range params {
switch p.Value.Type {
case v1.ParamTypeArray:
for _, pattern := range paramPatterns {
for i := range len(p.Value.ArrayVal) {
stringReplacements[fmt.Sprintf(pattern+"[%d]", p.Name, i)] = p.Value.ArrayVal[i]
}
arrayReplacements[fmt.Sprintf(pattern, p.Name)] = p.Value.ArrayVal
}
case v1.ParamTypeObject:
for _, pattern := range paramPatterns {
objectReplacements[fmt.Sprintf(pattern, p.Name)] = p.Value.ObjectVal
}
for k, v := range p.Value.ObjectVal {
stringReplacements[fmt.Sprintf(objectIndividualVariablePattern, p.Name, k)] = v
}
case v1.ParamTypeString:
fallthrough
default:
for _, pattern := range paramPatterns {
stringReplacements[fmt.Sprintf(pattern, p.Name)] = p.Value.StringVal
}
}
}
return stringReplacements, arrayReplacements, objectReplacements
}
func getContextReplacements(taskName string, tr *v1.TaskRun) map[string]string {
return map[string]string{
"context.taskRun.name": tr.Name,
"context.task.name": taskName,
"context.taskRun.namespace": tr.Namespace,
"context.taskRun.uid": string(tr.ObjectMeta.UID),
"context.task.retry-count": strconv.Itoa(len(tr.Status.RetriesStatus)),
}
}
// ApplyContexts applies the substitution from $(context.(taskRun|task).*) with the specified values.
// Uses "" as a default if a value is not available.
func ApplyContexts(spec *v1.TaskSpec, taskName string, tr *v1.TaskRun) *v1.TaskSpec {
return ApplyReplacements(spec, getContextReplacements(taskName, tr), map[string][]string{}, map[string]map[string]string{})
}
// ApplyWorkspaces applies the substitution from paths that the workspaces in declarations mounted to, the
// volumes that bindings are realized with in the task spec and the PersistentVolumeClaim names for the
// workspaces.
func ApplyWorkspaces(ctx context.Context, spec *v1.TaskSpec, declarations []v1.WorkspaceDeclaration, bindings []v1.WorkspaceBinding, vols map[string]corev1.Volume) *v1.TaskSpec {
stringReplacements := map[string]string{}
bindNames := sets.NewString()
for _, binding := range bindings {
bindNames.Insert(binding.Name)
}
for _, declaration := range declarations {
prefix := fmt.Sprintf("workspaces.%s.", declaration.Name)
if declaration.Optional && !bindNames.Has(declaration.Name) {
stringReplacements[prefix+"bound"] = "false"
stringReplacements[prefix+"path"] = ""
} else {
stringReplacements[prefix+"bound"] = "true"
spec = applyWorkspaceMountPath(prefix+"path", spec, declaration)
}
}
for name, vol := range vols {
stringReplacements[fmt.Sprintf("workspaces.%s.volume", name)] = vol.Name
}
for _, binding := range bindings {
if binding.PersistentVolumeClaim != nil {
stringReplacements[fmt.Sprintf("workspaces.%s.claim", binding.Name)] = binding.PersistentVolumeClaim.ClaimName
} else {
stringReplacements[fmt.Sprintf("workspaces.%s.claim", binding.Name)] = ""
}
}
return ApplyReplacements(spec, stringReplacements, map[string][]string{}, map[string]map[string]string{})
}
// ApplyParametersToWorkspaceBindings applies parameters to the WorkspaceBindings of a TaskRun. It takes a TaskSpec and a TaskRun as input and returns the modified TaskRun.
func ApplyParametersToWorkspaceBindings(ts *v1.TaskSpec, tr *v1.TaskRun) *v1.TaskRun {
tsCopy := ts.DeepCopy()
parameters, _, _ := getTaskParameters(tsCopy, tr, tsCopy.Params...)
tr.Spec.Workspaces = workspace.ReplaceWorkspaceBindingsVars(tr.Spec.Workspaces, parameters)
return tr
}
// applyWorkspaceMountPath accepts a workspace path variable of the form $(workspaces.foo.path) and replaces
// it in the fields of the TaskSpec. A new updated TaskSpec is returned. Steps or Sidecars in the TaskSpec
// that override the mountPath will receive that mountPath in place of the variable's value. Other Steps and
// Sidecars will see either the workspace's declared mountPath or the default of /workspaces/<name>.
func applyWorkspaceMountPath(variable string, spec *v1.TaskSpec, declaration v1.WorkspaceDeclaration) *v1.TaskSpec {
stringReplacements := map[string]string{variable: ""}
emptyArrayReplacements := map[string][]string{}
defaultMountPath := declaration.GetMountPath()
// Replace instances of the workspace path variable that are overridden per-Step
for i := range spec.Steps {
step := &spec.Steps[i]
for _, usage := range step.Workspaces {
if usage.Name == declaration.Name && usage.MountPath != "" {
stringReplacements[variable] = usage.MountPath
container.ApplyStepReplacements(step, stringReplacements, emptyArrayReplacements)
}
}
}
// Replace instances of the workspace path variable that are overridden per-Sidecar
for i := range spec.Sidecars {
sidecar := &spec.Sidecars[i]
for _, usage := range sidecar.Workspaces {
if usage.Name == declaration.Name && usage.MountPath != "" {
stringReplacements[variable] = usage.MountPath
container.ApplySidecarReplacements(sidecar, stringReplacements, emptyArrayReplacements)
}
}
}
// Replace any remaining instances of the workspace path variable, which should fall
// back to the mount path specified in the declaration.
stringReplacements[variable] = defaultMountPath
return ApplyReplacements(spec, stringReplacements, emptyArrayReplacements, map[string]map[string]string{})
}
// ApplyResults applies the substitution from values in results and step results which are referenced in spec as subitems
// of the replacementStr.
func ApplyResults(spec *v1.TaskSpec) *v1.TaskSpec {
// Apply all the Step Result replacements
for i := range spec.Steps {
stringReplacements := getStepResultReplacements(spec.Steps[i], i)
container.ApplyStepReplacements(&spec.Steps[i], stringReplacements, map[string][]string{})
}
stringReplacements := getTaskResultReplacements(spec)
return ApplyReplacements(spec, stringReplacements, map[string][]string{}, map[string]map[string]string{})
}
// getStepResultReplacements creates all combinations of string replacements from Step Results.
func getStepResultReplacements(step v1.Step, idx int) map[string]string {
stringReplacements := map[string]string{}
patterns := []string{
"step.results.%s.path",
"step.results[%q].path",
"step.results['%s'].path",
}
stepName := pod.StepName(step.Name, idx)
for _, result := range step.Results {
for _, pattern := range patterns {
stringReplacements[fmt.Sprintf(pattern, result.Name)] = filepath.Join(pipeline.StepsDir, stepName, "results", result.Name)
}
}
return stringReplacements
}
// getTaskResultReplacements creates all combinations of string replacements from TaskResults.
func getTaskResultReplacements(spec *v1.TaskSpec) map[string]string {
stringReplacements := map[string]string{}
patterns := []string{
"results.%s.path",
"results[%q].path",
"results['%s'].path",
}
for _, result := range spec.Results {
for _, pattern := range patterns {
stringReplacements[fmt.Sprintf(pattern, result.Name)] = filepath.Join(pipeline.DefaultResultPath, result.Name)
}
}
return stringReplacements
}
// ApplyArtifacts replaces the occurrences of artifacts.path and step.artifacts.path with the absolute tekton internal path
func ApplyArtifacts(spec *v1.TaskSpec) *v1.TaskSpec {
for i := range spec.Steps {
stringReplacements := getArtifactReplacements(spec.Steps[i], i)
container.ApplyStepReplacements(&spec.Steps[i], stringReplacements, map[string][]string{})
}
return spec
}
func getArtifactReplacements(step v1.Step, idx int) map[string]string {
stringReplacements := map[string]string{}
stepName := pod.StepName(step.Name, idx)
stringReplacements[artifactref.StepArtifactPathPattern] = filepath.Join(pipeline.StepsDir, stepName, "artifacts", "provenance.json")
stringReplacements[artifactref.TaskArtifactPathPattern] = filepath.Join(pipeline.ArtifactsDir, "provenance.json")
return stringReplacements
}
// ApplyStepExitCodePath replaces the occurrences of exitCode path with the absolute tekton internal path
// Replace $(steps.<step-name>.exitCode.path) with pipeline.StepPath/<step-name>/exitCode
func ApplyStepExitCodePath(spec *v1.TaskSpec) *v1.TaskSpec {
stringReplacements := map[string]string{}
for i, step := range spec.Steps {
stringReplacements[fmt.Sprintf("steps.%s.exitCode.path", pod.StepName(step.Name, i))] = filepath.Join(pipeline.StepsDir, pod.StepName(step.Name, i), "exitCode")
}
return ApplyReplacements(spec, stringReplacements, map[string][]string{}, map[string]map[string]string{})
}
// ApplyCredentialsPath applies a substitution of the key $(credentials.path) with the path that credentials
// from annotated secrets are written to.
func ApplyCredentialsPath(spec *v1.TaskSpec, path string) *v1.TaskSpec {
stringReplacements := map[string]string{
"credentials.path": path,
}
return ApplyReplacements(spec, stringReplacements, map[string][]string{}, map[string]map[string]string{})
}
// ApplyReplacements replaces placeholders for declared parameters with the specified replacements.
func ApplyReplacements(spec *v1.TaskSpec, stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) *v1.TaskSpec {
spec = spec.DeepCopy()
// Apply variable expansion to steps fields.
steps := spec.Steps
for i := range steps {
if steps[i].Params != nil {
steps[i].Params = steps[i].Params.ReplaceVariables(stringReplacements, arrayReplacements, objectReplacements)
}
container.ApplyStepReplacements(&steps[i], stringReplacements, arrayReplacements)
}
// Apply variable expansion to stepTemplate fields.
if spec.StepTemplate != nil {
container.ApplyStepTemplateReplacements(spec.StepTemplate, stringReplacements, arrayReplacements)
}
// Apply variable expansion to the build's volumes
for i, v := range spec.Volumes {
spec.Volumes[i].Name = substitution.ApplyReplacements(v.Name, stringReplacements)
if v.VolumeSource.ConfigMap != nil {
spec.Volumes[i].ConfigMap.Name = substitution.ApplyReplacements(v.ConfigMap.Name, stringReplacements)
for index, item := range v.ConfigMap.Items {
spec.Volumes[i].ConfigMap.Items[index].Key = substitution.ApplyReplacements(item.Key, stringReplacements)
spec.Volumes[i].ConfigMap.Items[index].Path = substitution.ApplyReplacements(item.Path, stringReplacements)
}
}
if v.VolumeSource.Secret != nil {
spec.Volumes[i].Secret.SecretName = substitution.ApplyReplacements(v.Secret.SecretName, stringReplacements)
for index, item := range v.Secret.Items {
spec.Volumes[i].Secret.Items[index].Key = substitution.ApplyReplacements(item.Key, stringReplacements)
spec.Volumes[i].Secret.Items[index].Path = substitution.ApplyReplacements(item.Path, stringReplacements)
}
}
if v.PersistentVolumeClaim != nil {
spec.Volumes[i].PersistentVolumeClaim.ClaimName = substitution.ApplyReplacements(v.PersistentVolumeClaim.ClaimName, stringReplacements)
}
if v.Projected != nil {
for _, s := range spec.Volumes[i].Projected.Sources {
if s.ConfigMap != nil {
s.ConfigMap.Name = substitution.ApplyReplacements(s.ConfigMap.Name, stringReplacements)
}
if s.Secret != nil {
s.Secret.Name = substitution.ApplyReplacements(s.Secret.Name, stringReplacements)
}
if s.ServiceAccountToken != nil {
s.ServiceAccountToken.Audience = substitution.ApplyReplacements(s.ServiceAccountToken.Audience, stringReplacements)
}
}
}
if v.CSI != nil {
if v.CSI.NodePublishSecretRef != nil {
spec.Volumes[i].CSI.NodePublishSecretRef.Name = substitution.ApplyReplacements(v.CSI.NodePublishSecretRef.Name, stringReplacements)
}
if v.CSI.VolumeAttributes != nil {
for key, value := range v.CSI.VolumeAttributes {
spec.Volumes[i].CSI.VolumeAttributes[key] = substitution.ApplyReplacements(value, stringReplacements)
}
}
}
}
for i, v := range spec.Workspaces {
spec.Workspaces[i].MountPath = substitution.ApplyReplacements(v.MountPath, stringReplacements)
}
// Apply variable substitution to the sidecar definitions
sidecars := spec.Sidecars
for i := range sidecars {
container.ApplySidecarReplacements(&sidecars[i], stringReplacements, arrayReplacements)
}
return spec
}
// ApplyPodTemplateParameters applies parameter substitution to a PodTemplate
func ApplyPodTemplateReplacements(podTemplate *podtpl.Template, tr *v1.TaskRun, defaults ...v1.ParamSpec) *podtpl.Template {
if podTemplate == nil {
return nil
}
result := podTemplate.DeepCopy()
stringReplacements, _, _ := getTaskParameters(nil, tr, defaults...)
// Apply substitution to NodeSelector
if result.NodeSelector != nil {
newNodeSelector := make(map[string]string)
for k, v := range result.NodeSelector {
newKey := substitution.ApplyReplacements(k, stringReplacements)
newValue := substitution.ApplyReplacements(v, stringReplacements)
newNodeSelector[newKey] = newValue
}
result.NodeSelector = newNodeSelector
}
// Apply substitution to Tolerations
for i := range result.Tolerations {
result.Tolerations[i].Key = substitution.ApplyReplacements(result.Tolerations[i].Key, stringReplacements)
result.Tolerations[i].Value = substitution.ApplyReplacements(result.Tolerations[i].Value, stringReplacements)
result.Tolerations[i].Operator = corev1.TolerationOperator(substitution.ApplyReplacements(string(result.Tolerations[i].Operator), stringReplacements))
result.Tolerations[i].Effect = corev1.TaintEffect(substitution.ApplyReplacements(string(result.Tolerations[i].Effect), stringReplacements))
}
// Apply substitution to Affinity
if result.Affinity != nil {
applyAffinityReplacements(result.Affinity, stringReplacements)
}
// Apply substitution to SecurityContext labels and annotations
if result.SecurityContext != nil {
applySecurityContextReplacements(result.SecurityContext, stringReplacements)
}
// Apply substitution to RuntimeClassName
if result.RuntimeClassName != nil {
runtimeClassName := substitution.ApplyReplacements(*result.RuntimeClassName, stringReplacements)
result.RuntimeClassName = &runtimeClassName
}
// Apply substitution to SchedulerName
if result.SchedulerName != "" {
result.SchedulerName = substitution.ApplyReplacements(result.SchedulerName, stringReplacements)
}
// Apply substitution to PriorityClassName
if result.PriorityClassName != nil {
priorityClassName := substitution.ApplyReplacements(*result.PriorityClassName, stringReplacements)
result.PriorityClassName = &priorityClassName
}
// Apply substitution to ImagePullSecrets
for i := range result.ImagePullSecrets {
result.ImagePullSecrets[i].Name = substitution.ApplyReplacements(result.ImagePullSecrets[i].Name, stringReplacements)
}
// Apply substitution to HostAliases
for i := range result.HostAliases {
result.HostAliases[i].IP = substitution.ApplyReplacements(result.HostAliases[i].IP, stringReplacements)
for j := range result.HostAliases[i].Hostnames {
result.HostAliases[i].Hostnames[j] = substitution.ApplyReplacements(result.HostAliases[i].Hostnames[j], stringReplacements)
}
}
// Apply substitution to TopologySpreadConstraints
for i := range result.TopologySpreadConstraints {
result.TopologySpreadConstraints[i].TopologyKey = substitution.ApplyReplacements(result.TopologySpreadConstraints[i].TopologyKey, stringReplacements)
if result.TopologySpreadConstraints[i].LabelSelector != nil {
applyLabelSelectorReplacements(result.TopologySpreadConstraints[i].LabelSelector, stringReplacements)
}
}
// Apply substitution to DNSPolicy
if result.DNSPolicy != nil {
dnsPolicy := corev1.DNSPolicy(substitution.ApplyReplacements(string(*result.DNSPolicy), stringReplacements))
result.DNSPolicy = &dnsPolicy
}
// Apply substitution to DNSConfig
if result.DNSConfig != nil {
applyDNSConfigReplacements(result.DNSConfig, stringReplacements)
}
// Apply substitution to Volumes
for i := range result.Volumes {
applyVolumeReplacements(&result.Volumes[i], stringReplacements)
}
// Apply substitution to Env
for i := range result.Env {
result.Env[i].Name = substitution.ApplyReplacements(result.Env[i].Name, stringReplacements)
result.Env[i].Value = substitution.ApplyReplacements(result.Env[i].Value, stringReplacements)
if result.Env[i].ValueFrom != nil {
applyEnvVarSourceReplacements(result.Env[i].ValueFrom, stringReplacements)
}
}
return result
}
func applyAffinityReplacements(affinity *corev1.Affinity, stringReplacements map[string]string) {
if affinity.NodeAffinity != nil {
applyNodeAffinityReplacements(affinity.NodeAffinity, stringReplacements)
}
if affinity.PodAffinity != nil {
applyPodAffinityReplacements(affinity.PodAffinity, stringReplacements)
}
if affinity.PodAntiAffinity != nil {
applyPodAntiAffinityReplacements(affinity.PodAntiAffinity, stringReplacements)
}
}
func applyNodeAffinityReplacements(nodeAffinity *corev1.NodeAffinity, stringReplacements map[string]string) {
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
for i := range nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
applyNodeSelectorTermReplacements(&nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[i], stringReplacements)
}
}
for i := range nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
applyNodeSelectorTermReplacements(&nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[i].Preference, stringReplacements)
}
}
func applyNodeSelectorTermReplacements(term *corev1.NodeSelectorTerm, stringReplacements map[string]string) {
for i := range term.MatchExpressions {
term.MatchExpressions[i].Key = substitution.ApplyReplacements(term.MatchExpressions[i].Key, stringReplacements)
for j := range term.MatchExpressions[i].Values {
term.MatchExpressions[i].Values[j] = substitution.ApplyReplacements(term.MatchExpressions[i].Values[j], stringReplacements)
}
}
for i := range term.MatchFields {
term.MatchFields[i].Key = substitution.ApplyReplacements(term.MatchFields[i].Key, stringReplacements)
for j := range term.MatchFields[i].Values {
term.MatchFields[i].Values[j] = substitution.ApplyReplacements(term.MatchFields[i].Values[j], stringReplacements)
}
}
}
func applyPodAffinityReplacements(podAffinity *corev1.PodAffinity, stringReplacements map[string]string) {
for i := range podAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
applyPodAffinityTermReplacements(&podAffinity.RequiredDuringSchedulingIgnoredDuringExecution[i], stringReplacements)
}
for i := range podAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
applyPodAffinityTermReplacements(&podAffinity.PreferredDuringSchedulingIgnoredDuringExecution[i].PodAffinityTerm, stringReplacements)
}
}
func applyPodAntiAffinityReplacements(podAntiAffinity *corev1.PodAntiAffinity, stringReplacements map[string]string) {
for i := range podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
applyPodAffinityTermReplacements(&podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution[i], stringReplacements)
}
for i := range podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
applyPodAffinityTermReplacements(&podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[i].PodAffinityTerm, stringReplacements)
}
}
func applyPodAffinityTermReplacements(term *corev1.PodAffinityTerm, stringReplacements map[string]string) {
if term.LabelSelector != nil {
applyLabelSelectorReplacements(term.LabelSelector, stringReplacements)
}
term.TopologyKey = substitution.ApplyReplacements(term.TopologyKey, stringReplacements)
if term.NamespaceSelector != nil {
applyLabelSelectorReplacements(term.NamespaceSelector, stringReplacements)
}
for i := range term.Namespaces {
term.Namespaces[i] = substitution.ApplyReplacements(term.Namespaces[i], stringReplacements)
}
}
func applyLabelSelectorReplacements(selector *metav1.LabelSelector, stringReplacements map[string]string) {
if selector.MatchLabels != nil {
newMatchLabels := make(map[string]string)
for k, v := range selector.MatchLabels {
newKey := substitution.ApplyReplacements(k, stringReplacements)
newValue := substitution.ApplyReplacements(v, stringReplacements)
newMatchLabels[newKey] = newValue
}
selector.MatchLabels = newMatchLabels
}
for i := range selector.MatchExpressions {
selector.MatchExpressions[i].Key = substitution.ApplyReplacements(selector.MatchExpressions[i].Key, stringReplacements)
for j := range selector.MatchExpressions[i].Values {
selector.MatchExpressions[i].Values[j] = substitution.ApplyReplacements(selector.MatchExpressions[i].Values[j], stringReplacements)
}
}
}
func applySecurityContextReplacements(securityContext *corev1.PodSecurityContext, stringReplacements map[string]string) {
// Apply substitution to SELinuxOptions
if securityContext.SELinuxOptions != nil {
securityContext.SELinuxOptions.User = substitution.ApplyReplacements(securityContext.SELinuxOptions.User, stringReplacements)
securityContext.SELinuxOptions.Role = substitution.ApplyReplacements(securityContext.SELinuxOptions.Role, stringReplacements)
securityContext.SELinuxOptions.Type = substitution.ApplyReplacements(securityContext.SELinuxOptions.Type, stringReplacements)
securityContext.SELinuxOptions.Level = substitution.ApplyReplacements(securityContext.SELinuxOptions.Level, stringReplacements)
}
// Apply substitution to WindowsOptions
if securityContext.WindowsOptions != nil {
if securityContext.WindowsOptions.GMSACredentialSpecName != nil {
gmsaCredentialSpecName := substitution.ApplyReplacements(*securityContext.WindowsOptions.GMSACredentialSpecName, stringReplacements)
securityContext.WindowsOptions.GMSACredentialSpecName = &gmsaCredentialSpecName
}
if securityContext.WindowsOptions.GMSACredentialSpec != nil {
gmsaCredentialSpec := substitution.ApplyReplacements(*securityContext.WindowsOptions.GMSACredentialSpec, stringReplacements)
securityContext.WindowsOptions.GMSACredentialSpec = &gmsaCredentialSpec
}
if securityContext.WindowsOptions.RunAsUserName != nil {
runAsUserName := substitution.ApplyReplacements(*securityContext.WindowsOptions.RunAsUserName, stringReplacements)
securityContext.WindowsOptions.RunAsUserName = &runAsUserName
}
}
// Apply substitution to SupplementalGroupsPolicy
if securityContext.SupplementalGroupsPolicy != nil {
supplementalGroupsPolicy := corev1.SupplementalGroupsPolicy(substitution.ApplyReplacements(string(*securityContext.SupplementalGroupsPolicy), stringReplacements))
securityContext.SupplementalGroupsPolicy = &supplementalGroupsPolicy
}
// Apply substitution to Sysctls
for i := range securityContext.Sysctls {
securityContext.Sysctls[i].Name = substitution.ApplyReplacements(securityContext.Sysctls[i].Name, stringReplacements)
securityContext.Sysctls[i].Value = substitution.ApplyReplacements(securityContext.Sysctls[i].Value, stringReplacements)
}
// Apply substitution to FSGroupChangePolicy
if securityContext.FSGroupChangePolicy != nil {
fsGroupChangePolicy := corev1.PodFSGroupChangePolicy(substitution.ApplyReplacements(string(*securityContext.FSGroupChangePolicy), stringReplacements))
securityContext.FSGroupChangePolicy = &fsGroupChangePolicy
}
// Apply substitution to SeccompProfile
if securityContext.SeccompProfile != nil {
securityContext.SeccompProfile.Type = corev1.SeccompProfileType(substitution.ApplyReplacements(string(securityContext.SeccompProfile.Type), stringReplacements))
if securityContext.SeccompProfile.LocalhostProfile != nil {
localhostProfile := substitution.ApplyReplacements(*securityContext.SeccompProfile.LocalhostProfile, stringReplacements)
securityContext.SeccompProfile.LocalhostProfile = &localhostProfile
}
}
// Apply substitution to AppArmorProfile
if securityContext.AppArmorProfile != nil {
securityContext.AppArmorProfile.Type = corev1.AppArmorProfileType(substitution.ApplyReplacements(string(securityContext.AppArmorProfile.Type), stringReplacements))
if securityContext.AppArmorProfile.LocalhostProfile != nil {
localhostProfile := substitution.ApplyReplacements(*securityContext.AppArmorProfile.LocalhostProfile, stringReplacements)
securityContext.AppArmorProfile.LocalhostProfile = &localhostProfile
}
}
// Apply substitution to SELinuxChangePolicy
if securityContext.SELinuxChangePolicy != nil {
seLinuxChangePolicy := corev1.PodSELinuxChangePolicy(substitution.ApplyReplacements(string(*securityContext.SELinuxChangePolicy), stringReplacements))
securityContext.SELinuxChangePolicy = &seLinuxChangePolicy
}
}
func applyDNSConfigReplacements(dnsConfig *corev1.PodDNSConfig, stringReplacements map[string]string) {
for i := range dnsConfig.Nameservers {
dnsConfig.Nameservers[i] = substitution.ApplyReplacements(dnsConfig.Nameservers[i], stringReplacements)
}
for i := range dnsConfig.Searches {
dnsConfig.Searches[i] = substitution.ApplyReplacements(dnsConfig.Searches[i], stringReplacements)
}
for i := range dnsConfig.Options {
dnsConfig.Options[i].Name = substitution.ApplyReplacements(dnsConfig.Options[i].Name, stringReplacements)
if dnsConfig.Options[i].Value != nil {
value := substitution.ApplyReplacements(*dnsConfig.Options[i].Value, stringReplacements)
dnsConfig.Options[i].Value = &value
}
}
}
func applyVolumeReplacements(volume *corev1.Volume, stringReplacements map[string]string) {
volume.Name = substitution.ApplyReplacements(volume.Name, stringReplacements)
if volume.ConfigMap != nil {
volume.ConfigMap.Name = substitution.ApplyReplacements(volume.ConfigMap.Name, stringReplacements)
for i := range volume.ConfigMap.Items {
volume.ConfigMap.Items[i].Key = substitution.ApplyReplacements(volume.ConfigMap.Items[i].Key, stringReplacements)
volume.ConfigMap.Items[i].Path = substitution.ApplyReplacements(volume.ConfigMap.Items[i].Path, stringReplacements)
}
}
if volume.Secret != nil {
volume.Secret.SecretName = substitution.ApplyReplacements(volume.Secret.SecretName, stringReplacements)
for i := range volume.Secret.Items {
volume.Secret.Items[i].Key = substitution.ApplyReplacements(volume.Secret.Items[i].Key, stringReplacements)
volume.Secret.Items[i].Path = substitution.ApplyReplacements(volume.Secret.Items[i].Path, stringReplacements)
}
}
if volume.PersistentVolumeClaim != nil {
volume.PersistentVolumeClaim.ClaimName = substitution.ApplyReplacements(volume.PersistentVolumeClaim.ClaimName, stringReplacements)
}
if volume.Projected != nil {
for _, s := range volume.Projected.Sources {
if s.ConfigMap != nil {
s.ConfigMap.Name = substitution.ApplyReplacements(s.ConfigMap.Name, stringReplacements)
}
if s.Secret != nil {
s.Secret.Name = substitution.ApplyReplacements(s.Secret.Name, stringReplacements)
}
if s.ServiceAccountToken != nil {
s.ServiceAccountToken.Audience = substitution.ApplyReplacements(s.ServiceAccountToken.Audience, stringReplacements)
}
}
}
if volume.CSI != nil {
if volume.CSI.NodePublishSecretRef != nil {
volume.CSI.NodePublishSecretRef.Name = substitution.ApplyReplacements(volume.CSI.NodePublishSecretRef.Name, stringReplacements)
}
if volume.CSI.VolumeAttributes != nil {
for key, value := range volume.CSI.VolumeAttributes {
volume.CSI.VolumeAttributes[key] = substitution.ApplyReplacements(value, stringReplacements)
}
}
}
}
func applyEnvVarSourceReplacements(valueFrom *corev1.EnvVarSource, stringReplacements map[string]string) {
if valueFrom.ConfigMapKeyRef != nil {
valueFrom.ConfigMapKeyRef.Name = substitution.ApplyReplacements(valueFrom.ConfigMapKeyRef.Name, stringReplacements)
valueFrom.ConfigMapKeyRef.Key = substitution.ApplyReplacements(valueFrom.ConfigMapKeyRef.Key, stringReplacements)
}
if valueFrom.SecretKeyRef != nil {
valueFrom.SecretKeyRef.Name = substitution.ApplyReplacements(valueFrom.SecretKeyRef.Name, stringReplacements)
valueFrom.SecretKeyRef.Key = substitution.ApplyReplacements(valueFrom.SecretKeyRef.Key, stringReplacements)
}
if valueFrom.FieldRef != nil {
valueFrom.FieldRef.FieldPath = substitution.ApplyReplacements(valueFrom.FieldRef.FieldPath, stringReplacements)
}
if valueFrom.ResourceFieldRef != nil {
valueFrom.ResourceFieldRef.Resource = substitution.ApplyReplacements(valueFrom.ResourceFieldRef.Resource, stringReplacements)
valueFrom.ResourceFieldRef.ContainerName = substitution.ApplyReplacements(valueFrom.ResourceFieldRef.ContainerName, stringReplacements)
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"context"
"errors"
"fmt"
"strings"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
resolutionV1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
"github.com/tektoncd/pipeline/pkg/reconciler/apiserver"
"github.com/tektoncd/pipeline/pkg/remote"
"github.com/tektoncd/pipeline/pkg/remoteresolution/remote/resolution"
remoteresource "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
"github.com/tektoncd/pipeline/pkg/substitution"
"github.com/tektoncd/pipeline/pkg/trustedresources"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"knative.dev/pkg/kmeta"
)
// GetTaskKind returns the referenced Task kind (Task, ...) if the TaskRun is using TaskRef.
func GetTaskKind(taskrun *v1.TaskRun) v1.TaskKind {
kind := v1.NamespacedTaskKind
if taskrun.Spec.TaskRef != nil && taskrun.Spec.TaskRef.Kind != "" {
kind = taskrun.Spec.TaskRef.Kind
}
return kind
}
// GetTaskFuncFromTaskRun is a factory function that will use the given TaskRef as context to return a valid GetTask function.
// It also requires a kubeclient, tektonclient, namespace, and service account in case it needs to find that task in
// cluster or authorize against an external repository. It will figure out whether it needs to look in the cluster or in
// a remote image to fetch the reference. It will also return the "kind" of the task being referenced.
// OCI bundle and remote resolution tasks will be verified by trusted resources if the feature is enabled
func GetTaskFuncFromTaskRun(ctx context.Context, k8s kubernetes.Interface, tekton clientset.Interface, requester remoteresource.Requester, taskrun *v1.TaskRun, verificationPolicies []*v1alpha1.VerificationPolicy) GetTask {
// if the spec is already in the status, do not try to fetch it again, just use it as source of truth.
// Same for the RefSource field in the Status.Provenance.
if taskrun.Status.TaskSpec != nil {
return func(_ context.Context, name string) (*v1.Task, *v1.RefSource, *trustedresources.VerificationResult, error) {
var refSource *v1.RefSource
if taskrun.Status.Provenance != nil {
refSource = taskrun.Status.Provenance.RefSource
}
return &v1.Task{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: taskrun.Namespace,
},
Spec: *taskrun.Status.TaskSpec,
}, refSource, nil, nil
}
}
return GetTaskFunc(ctx, k8s, tekton, requester, taskrun, taskrun.Spec.TaskRef, taskrun.Name, taskrun.Namespace, taskrun.Spec.ServiceAccountName, verificationPolicies)
}
// GetTaskFunc is a factory function that will use the given TaskRef as context to return a valid GetTask function.
// It also requires a kubeclient, tektonclient, namespace, and service account in case it needs to find that task in
// cluster or authorize against an external repository. It will figure out whether it needs to look in the cluster or in
// a remote image to fetch the reference. It will also return the "kind" of the task being referenced.
// OCI bundle and remote resolution tasks will be verified by trusted resources if the feature is enabled
func GetTaskFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset.Interface, requester remoteresource.Requester,
owner kmeta.OwnerRefable, tr *v1.TaskRef, trName string, namespace, saName string, verificationPolicies []*v1alpha1.VerificationPolicy,
) GetTask {
kind := v1.NamespacedTaskKind
if tr != nil && tr.Kind != "" {
kind = tr.Kind
}
switch {
case tr != nil && tr.Resolver != "" && requester != nil:
// Return an inline function that implements GetTask by calling Resolver.Get with the specified task type and
// casting it to a TaskObject.
return func(ctx context.Context, name string) (*v1.Task, *v1.RefSource, *trustedresources.VerificationResult, error) {
var replacedParams v1.Params
var url string
if ownerAsTR, ok := owner.(*v1.TaskRun); ok {
stringReplacements, arrayReplacements, _ := replacementsFromParams(ownerAsTR.Spec.Params)
for k, v := range getContextReplacements("", ownerAsTR) {
stringReplacements[k] = v
}
for _, p := range tr.Params {
p.Value.ApplyReplacements(stringReplacements, arrayReplacements, nil)
replacedParams = append(replacedParams, p)
}
if err := v1.RefNameLikeUrl(tr.Name); err == nil {
// The name is url-like so its not a local reference.
tr.Name = substitution.ApplyReplacements(tr.Name, stringReplacements)
url = tr.Name
}
} else {
replacedParams = append(replacedParams, tr.Params...)
}
resolverPayload := remoteresource.ResolverPayload{
Name: trName,
Namespace: namespace,
ResolutionSpec: &resolutionV1beta1.ResolutionRequestSpec{
Params: replacedParams,
URL: url,
},
}
resolver := resolution.NewResolver(requester, owner, string(tr.Resolver), resolverPayload)
return resolveTask(ctx, resolver, name, namespace, kind, k8s, tekton, verificationPolicies)
}
default:
// Even if there is no task ref, we should try to return a local resolver.
local := &LocalTaskRefResolver{
Namespace: namespace,
Kind: kind,
Tektonclient: tekton,
}
return local.GetTask
}
}
// GetStepActionFunc is a factory function that will use the given Ref as context to return a valid GetStepAction function.
// It also requires a kubeclient, tektonclient, requester in case it needs to find that task in
// cluster or authorize against an external repository. It will figure out whether it needs to look in the cluster or in
// a remote location to fetch the reference.
func GetStepActionFunc(tekton clientset.Interface, k8s kubernetes.Interface, requester remoteresource.Requester, tr *v1.TaskRun, taskSpec v1.TaskSpec, step *v1.Step) GetStepAction {
trName := tr.Name
namespace := tr.Namespace
if step.Ref != nil && step.Ref.Resolver != "" && requester != nil {
// Return an inline function that implements GetStepAction by calling Resolver.Get with the specified StepAction type and
// casting it to a StepAction.
return func(ctx context.Context, name string) (*v1beta1.StepAction, *v1.RefSource, error) {
// Perform params replacements for StepAction resolver params
ApplyParameterSubstitutionInResolverParams(tr, taskSpec, step)
resolverPayload := remoteresource.ResolverPayload{
Name: trName,
Namespace: namespace,
ResolutionSpec: &resolutionV1beta1.ResolutionRequestSpec{
Params: step.Ref.Params,
URL: step.Ref.Name,
},
}
resolver := resolution.NewResolver(requester, tr, string(step.Ref.Resolver), resolverPayload)
return resolveStepAction(ctx, resolver, name, namespace, k8s, tekton)
}
}
local := &LocalStepActionRefResolver{
Namespace: namespace,
Tektonclient: tekton,
}
return local.GetStepAction
}
// ApplyParameterSubstitutionInResolverParams applies parameter substitutions in resolver params for Step Ref.
func ApplyParameterSubstitutionInResolverParams(tr *v1.TaskRun, taskSpec v1.TaskSpec, step *v1.Step) {
stringReplacements := make(map[string]string)
arrayReplacements := make(map[string][]string)
objectReplacements := make(map[string]map[string]string)
defaultSR, defaultAR, defaultOR := replacementsFromDefaultParams(taskSpec.Params)
stringReplacements, arrayReplacements, objectReplacements = extendReplacements(stringReplacements, arrayReplacements, objectReplacements, defaultSR, defaultAR, defaultOR)
paramSR, paramAR, paramOR := replacementsFromParams(tr.Spec.Params)
stringReplacements, arrayReplacements, objectReplacements = extendReplacements(stringReplacements, arrayReplacements, objectReplacements, paramSR, paramAR, paramOR)
step.Ref.Params = step.Ref.Params.ReplaceVariables(stringReplacements, arrayReplacements, objectReplacements)
}
func extendReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string, stringReplacementsToAdd map[string]string, arrayReplacementsToAdd map[string][]string, objectReplacementsToAdd map[string]map[string]string) (map[string]string, map[string][]string, map[string]map[string]string) {
for k, v := range stringReplacementsToAdd {
stringReplacements[k] = v
}
for k, v := range arrayReplacementsToAdd {
arrayReplacements[k] = v
}
objectReplacements = extendObjectReplacements(objectReplacements, objectReplacementsToAdd)
return stringReplacements, arrayReplacements, objectReplacements
}
func extendObjectReplacements(objectReplacements map[string]map[string]string, objectReplacementsToAdd map[string]map[string]string) map[string]map[string]string {
for k, v := range objectReplacementsToAdd {
for key, val := range v {
if objectReplacements != nil {
if objectReplacements[k] != nil {
objectReplacements[k][key] = val
} else {
objectReplacements[k] = v
}
}
}
}
return objectReplacements
}
// resolveTask accepts an impl of remote.Resolver and attempts to
// fetch a task with given name and verify the v1beta1 task if trusted resources is enabled.
// An error is returned if the remoteresource doesn't work
// A VerificationResult is returned if trusted resources is enabled, VerificationResult contains the result type and err.
// or the returned data isn't a valid *v1beta1.Task.
func resolveTask(ctx context.Context, resolver remote.Resolver, name, namespace string, kind v1.TaskKind, k8s kubernetes.Interface, tekton clientset.Interface, verificationPolicies []*v1alpha1.VerificationPolicy) (*v1.Task, *v1.RefSource, *trustedresources.VerificationResult, error) {
// Because the resolver will only return references with the same kind, this will ensure we
// don't accidentally return a Task with the same name but different kind.
obj, refSource, err := resolver.Get(ctx, strings.TrimSuffix(strings.ToLower(string(kind)), "s"), name)
if err != nil {
return nil, nil, nil, err
}
taskObj, vr, err := readRuntimeObjectAsTask(ctx, namespace, obj, k8s, tekton, refSource, verificationPolicies)
if err != nil {
return nil, nil, nil, err
}
return taskObj, refSource, vr, nil
}
func resolveStepAction(ctx context.Context, resolver remote.Resolver, name, namespace string, k8s kubernetes.Interface, tekton clientset.Interface) (*v1beta1.StepAction, *v1.RefSource, error) {
obj, refSource, err := resolver.Get(ctx, "StepAction", name)
if err != nil {
return nil, nil, err
}
switch obj := obj.(type) {
case *v1beta1.StepAction:
// Cleanup object from things we don't care about
// FIXME: extract this in a function
obj.ObjectMeta.OwnerReferences = nil
o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
if err != nil {
return nil, nil, err
}
if mutatedStepAction, ok := o.(*v1beta1.StepAction); ok {
mutatedStepAction.ObjectMeta = obj.ObjectMeta
return mutatedStepAction, refSource, nil
}
case *v1alpha1.StepAction:
obj.SetDefaults(ctx)
// Cleanup object from things we don't care about
// FIXME: extract this in a function
obj.ObjectMeta.OwnerReferences = nil
o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
if err != nil {
return nil, nil, err
}
if mutatedStepAction, ok := o.(*v1alpha1.StepAction); ok {
mutatedStepAction.ObjectMeta = obj.ObjectMeta
v1BetaStepAction := v1beta1.StepAction{
TypeMeta: metav1.TypeMeta{
Kind: "StepAction",
APIVersion: "tekton.dev/v1beta1",
},
}
err := mutatedStepAction.ConvertTo(ctx, &v1BetaStepAction)
if err != nil {
return nil, nil, err
}
return &v1BetaStepAction, refSource, nil
}
}
return nil, nil, errors.New("resource is not a StepAction")
}
// readRuntimeObjectAsTask tries to convert a generic runtime.Object
// into a *v1.Task type so that its meta and spec fields
// can be read. v1beta1 object will be converted to v1 and returned.
// An error is returned if the given object is not a Task
// or if there is an error validating or upgrading an older TaskObject into
// its v1beta1 equivalent.
// A VerificationResult is returned if trusted resources is enabled, VerificationResult contains the result type and err.
// v1beta1 task will be verified by trusted resources if the feature is enabled
// TODO(#5541): convert v1beta1 obj to v1 once we use v1 as the stored version
func readRuntimeObjectAsTask(ctx context.Context, namespace string, obj runtime.Object, k8s kubernetes.Interface, tekton clientset.Interface, refSource *v1.RefSource, verificationPolicies []*v1alpha1.VerificationPolicy) (*v1.Task, *trustedresources.VerificationResult, error) {
switch obj := obj.(type) {
case *v1beta1.Task:
obj.SetDefaults(ctx)
// Cleanup object from things we don't care about
// FIXME: extract this in a function
obj.ObjectMeta.OwnerReferences = nil
// Verify the Task once we fetch from the remote resolution, mutating, validation and conversion of the task should happen after the verification, since signatures are based on the remote task contents
vr := trustedresources.VerifyResource(ctx, obj, k8s, refSource, verificationPolicies)
// Issue a dry-run request to create the remote Task, so that it can undergo validation from validating admission webhooks
// without actually creating the Task on the cluster.
o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
if err != nil {
return nil, nil, err
}
if mutatedTask, ok := o.(*v1beta1.Task); ok {
t := &v1.Task{
TypeMeta: metav1.TypeMeta{
Kind: "Task",
APIVersion: "tekton.dev/v1",
},
}
mutatedTask.ObjectMeta = obj.ObjectMeta
if err := mutatedTask.ConvertTo(ctx, t); err != nil {
return nil, nil, fmt.Errorf("failed to convert obj %s into Pipeline", mutatedTask.GetObjectKind().GroupVersionKind().String())
}
return t, &vr, nil
}
case *v1.Task:
// This SetDefaults is currently not necessary, but for consistency, it is recommended to add it.
// Avoid forgetting to add it in the future when there is a v2 version, causing similar problems.
obj.SetDefaults(ctx)
// Cleanup object from things we don't care about
// FIXME: extract this in a function
obj.ObjectMeta.OwnerReferences = nil
vr := trustedresources.VerifyResource(ctx, obj, k8s, refSource, verificationPolicies)
// Issue a dry-run request to create the remote Task, so that it can undergo validation from validating admission webhooks
// without actually creating the Task on the cluster
o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
if err != nil {
return nil, nil, err
}
if mutatedTask, ok := o.(*v1.Task); ok {
mutatedTask.ObjectMeta = obj.ObjectMeta
return mutatedTask, &vr, nil
}
}
return nil, nil, errors.New("resource is not a task")
}
// LocalTaskRefResolver uses the current cluster to resolve a task reference.
type LocalTaskRefResolver struct {
Namespace string
Kind v1.TaskKind
Tektonclient clientset.Interface
}
// GetTask will resolve a Task from the local cluster using a versioned Tekton client. It will
// return an error if it can't find an appropriate Task for any reason.
// TODO(#6666): support local task verification
func (l *LocalTaskRefResolver) GetTask(ctx context.Context, name string) (*v1.Task, *v1.RefSource, *trustedresources.VerificationResult, error) {
// If we are going to resolve this reference locally, we need a namespace scope.
if l.Namespace == "" {
return nil, nil, nil, fmt.Errorf("must specify namespace to resolve reference to task %s", name)
}
task, err := l.Tektonclient.TektonV1().Tasks(l.Namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, nil, nil, err
}
return task, nil, nil, nil
}
// LocalStepActionRefResolver uses the current cluster to resolve a StepAction reference.
type LocalStepActionRefResolver struct {
Namespace string
Tektonclient clientset.Interface
}
// GetStepAction will resolve a StepAction from the local cluster using a versioned Tekton client.
// It will return an error if it can't find an appropriate StepAction for any reason.
func (l *LocalStepActionRefResolver) GetStepAction(ctx context.Context, name string) (*v1beta1.StepAction, *v1.RefSource, error) {
// If we are going to resolve this reference locally, we need a namespace scope.
if l.Namespace == "" {
return nil, nil, fmt.Errorf("must specify namespace to resolve reference to step action %s", name)
}
stepAction, err := l.Tektonclient.TektonV1beta1().StepActions(l.Namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, nil, err
}
return stepAction, nil, nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"context"
"errors"
"fmt"
"github.com/tektoncd/pipeline/pkg/apis/config"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
resolutionutil "github.com/tektoncd/pipeline/pkg/internal/resolution"
"github.com/tektoncd/pipeline/pkg/pod"
remoteresource "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
"github.com/tektoncd/pipeline/pkg/trustedresources"
"golang.org/x/sync/errgroup"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// ResolvedTask contains the data that is needed to execute
// the TaskRun.
type ResolvedTask struct {
TaskName string
Kind v1.TaskKind
TaskSpec *v1.TaskSpec
// VerificationResult is the result from trusted resources if the feature is enabled.
VerificationResult *trustedresources.VerificationResult
}
// GetStepAction is a function used to retrieve StepActions.
type GetStepAction func(context.Context, string) (*v1beta1.StepAction, *v1.RefSource, error)
// GetTask is a function used to retrieve Tasks.
// VerificationResult is the result from trusted resources if the feature is enabled.
type GetTask func(context.Context, string) (*v1.Task, *v1.RefSource, *trustedresources.VerificationResult, error)
// GetTaskRun is a function used to retrieve TaskRuns
type GetTaskRun func(string) (*v1.TaskRun, error)
// GetTaskData will retrieve the Task metadata and Spec associated with the
// provided TaskRun. This can come from a reference Task or from the TaskRun's
// metadata and embedded TaskSpec.
func GetTaskData(ctx context.Context, taskRun *v1.TaskRun, getTask GetTask) (*resolutionutil.ResolvedObjectMeta, *v1.TaskSpec, error) {
taskMeta := metav1.ObjectMeta{}
taskSpec := v1.TaskSpec{}
var refSource *v1.RefSource
var verificationResult *trustedresources.VerificationResult
switch {
case taskRun.Spec.TaskRef != nil && taskRun.Spec.TaskRef.Name != "":
// Get related task for taskrun
t, source, vr, err := getTask(ctx, taskRun.Spec.TaskRef.Name)
if err != nil {
return nil, nil, fmt.Errorf("error when listing tasks for taskRun %s: %w", taskRun.Name, err)
}
taskMeta = t.ObjectMeta
taskSpec = t.Spec
refSource = source
verificationResult = vr
case taskRun.Spec.TaskSpec != nil:
taskMeta = taskRun.ObjectMeta
taskSpec = *taskRun.Spec.TaskSpec
// TODO: if we want to set RefSource for embedded taskspec, set it here.
// https://github.com/tektoncd/pipeline/issues/5522
case taskRun.Spec.TaskRef != nil && taskRun.Spec.TaskRef.Resolver != "":
task, source, vr, err := getTask(ctx, taskRun.Name)
switch {
case err != nil:
return nil, nil, err
case task == nil:
return nil, nil, errors.New("resolution of remote resource completed successfully but no task was returned")
default:
taskMeta = task.ObjectMeta
taskSpec = task.Spec
}
refSource = source
verificationResult = vr
default:
return nil, nil, fmt.Errorf("taskRun %s not providing TaskRef or TaskSpec", taskRun.Name)
}
taskSpec.SetDefaults(ctx)
return &resolutionutil.ResolvedObjectMeta{
ObjectMeta: &taskMeta,
RefSource: refSource,
VerificationResult: verificationResult,
}, &taskSpec, nil
}
// stepRefResolution holds the outcome of resolving a step referencing a StepAction.
type stepRefResolution struct {
resolvedStep *v1.Step
source *v1.RefSource
}
// hasStepRefs provides a fast check to see if any steps in a TaskSpec contain a reference to a StepAction.
func hasStepRefs(taskSpec *v1.TaskSpec) bool {
for _, step := range taskSpec.Steps {
if step.Ref != nil {
return true
}
}
return false
}
// resolveStepRef resolves a step referecing a StepAction by fetching the remote StepAction, merging it with the Step's specification, and returning the resolved step.
func resolveStepRef(ctx context.Context, taskSpec v1.TaskSpec, taskRun *v1.TaskRun, tekton clientset.Interface, k8s kubernetes.Interface, requester remoteresource.Requester, step *v1.Step) (*v1.Step, *v1.RefSource, error) {
resolvedStep := step.DeepCopy()
getStepAction := GetStepActionFunc(tekton, k8s, requester, taskRun, taskSpec, resolvedStep)
stepAction, source, err := getStepAction(ctx, resolvedStep.Ref.Name)
if err != nil {
return nil, nil, err
}
stepActionSpec := stepAction.StepActionSpec()
stepActionSpec.SetDefaults(ctx)
stepFromStepAction := stepActionSpec.ToStep()
if err := validateStepHasStepActionParameters(resolvedStep.Params, stepActionSpec.Params); err != nil {
return nil, nil, err
}
stepFromStepAction, err = applyStepActionParameters(stepFromStepAction, &taskSpec, taskRun, resolvedStep.Params, stepActionSpec.Params)
if err != nil {
return nil, nil, err
}
// Merge fields from the resolved StepAction into the step
resolvedStep.Image = stepFromStepAction.Image
resolvedStep.SecurityContext = stepFromStepAction.SecurityContext
if len(stepFromStepAction.Command) > 0 {
resolvedStep.Command = stepFromStepAction.Command
}
if len(stepFromStepAction.Args) > 0 {
resolvedStep.Args = stepFromStepAction.Args
}
if stepFromStepAction.Script != "" {
resolvedStep.Script = stepFromStepAction.Script
}
resolvedStep.WorkingDir = stepFromStepAction.WorkingDir
if stepFromStepAction.Env != nil {
resolvedStep.Env = stepFromStepAction.Env
}
if len(stepFromStepAction.VolumeMounts) > 0 {
resolvedStep.VolumeMounts = stepFromStepAction.VolumeMounts
}
if len(stepFromStepAction.Results) > 0 {
resolvedStep.Results = stepFromStepAction.Results
}
// Finalize by clearing Ref and Params, as they have been resolved
resolvedStep.Ref = nil
resolvedStep.Params = nil
return resolvedStep, source, nil
}
// updateTaskRunProvenance update the TaskRun's status with source provenance information for a given step
func updateTaskRunProvenance(taskRun *v1.TaskRun, stepName string, stepIndex int, source *v1.RefSource, stepStatusIndex map[string]int) {
var provenance *v1.Provenance
// The StepState already exists. Update it in place
if index, found := stepStatusIndex[stepName]; found {
if taskRun.Status.Steps[index].Provenance == nil {
taskRun.Status.Steps[index].Provenance = &v1.Provenance{}
}
taskRun.Status.Steps[index].Provenance.RefSource = source
return
}
provenance = &v1.Provenance{RefSource: source}
// No existing StepState found. Create and append a new one
newState := v1.StepState{
Name: pod.TrimStepPrefix(pod.StepName(stepName, stepIndex)),
Provenance: provenance,
}
taskRun.Status.Steps = append(taskRun.Status.Steps, newState)
}
// GetStepActionsData extracts the StepActions and merges them with the inlined Step specification.
func GetStepActionsData(ctx context.Context, taskSpec v1.TaskSpec, taskRun *v1.TaskRun, tekton clientset.Interface, k8s kubernetes.Interface, requester remoteresource.Requester) ([]v1.Step, error) {
steps := make([]v1.Step, len(taskSpec.Steps))
// Init step states and known step states indexes lookup map
if taskRun.Status.Steps == nil {
taskRun.Status.Steps = []v1.StepState{}
}
stepStatusIndex := make(map[string]int, len(taskRun.Status.Steps))
for i, stepState := range taskRun.Status.Steps {
stepStatusIndex[stepState.Name] = i
}
// If there are no step-ref to resolve, return immediately with nil provenance
if !hasStepRefs(&taskSpec) {
for i, step := range taskSpec.Steps {
steps[i] = step
updateTaskRunProvenance(taskRun, step.Name, i, nil, stepStatusIndex) // create StepState with nil provenance
}
return steps, nil
}
// Phase 1: Concurrently resolve all StepActions
stepRefConcurrencyLimit := config.FromContextOrDefaults(ctx).Defaults.DefaultStepRefConcurrencyLimit
g, ctx := errgroup.WithContext(ctx)
// This limit prevents overwhelming the API server or remote git servers
g.SetLimit(stepRefConcurrencyLimit)
stepRefResolutions := make([]*stepRefResolution, len(taskSpec.Steps))
for i, step := range taskSpec.Steps {
if step.Ref == nil { // Only process steps with a Ref
continue
}
g.Go(func() error {
resolvedStep, source, err := resolveStepRef(ctx, taskSpec, taskRun, tekton, k8s, requester, &step)
if err != nil {
return fmt.Errorf("failed to resolve step ref for step %q (index %d): %w", step.Name, i, err)
}
stepRefResolutions[i] = &stepRefResolution{resolvedStep: resolvedStep, source: source}
return nil
})
}
if err := g.Wait(); err != nil {
return nil, err
}
// Phase 2: Sequentially merge results into the final step list and update status
for i, step := range taskSpec.Steps {
if step.Ref == nil {
steps[i] = step
updateTaskRunProvenance(taskRun, step.Name, i, nil, stepStatusIndex) // create StepState for inline step with nil provenance
continue
}
stepRefResolution := stepRefResolutions[i]
steps[i] = *stepRefResolution.resolvedStep
updateTaskRunProvenance(taskRun, stepRefResolution.resolvedStep.Name, i, stepRefResolution.source, stepStatusIndex)
}
return steps, nil
}
package resources
import (
"fmt"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/substitution"
"k8s.io/apimachinery/pkg/util/sets"
)
// ValidateParamArrayIndex validates if the param reference to an array param is out of bound.
// error is returned when the array indexing reference is out of bound of the array param
// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2.
// - `params` are params from taskrun.
// - `ts` contains params declarations and references to array params.
func ValidateParamArrayIndex(ts *v1.TaskSpec, params v1.Params) error {
return ValidateOutOfBoundArrayParams(ts.Params, params, ts.GetIndexingReferencesToArrayParams())
}
// ValidateOutOfBoundArrayParams returns an error if the array indexing params are out of bounds,
// based on the param declarations, the parameters passed in at runtime, and the indexing references
// to array params from a task or pipeline spec.
// Example of arrayIndexingReferences: ["$(params.a-array-param[1])", "$(params.b-array-param[2])"]
func ValidateOutOfBoundArrayParams(declarations v1.ParamSpecs, params v1.Params, arrayIndexingReferences sets.String) error {
arrayParamLengths := declarations.ExtractDefaultParamArrayLengths()
for k, v := range params.ExtractParamArrayLengths() {
arrayParamLengths[k] = v
}
outofBoundParams := sets.String{}
for val := range arrayIndexingReferences {
indexString := substitution.ExtractIndexString(val)
idx, _ := substitution.ExtractIndex(indexString)
// this will extract the param name from reference
// e.g. $(params.a-array-param[1]) -> a-array-param
paramName, _, _ := substitution.ExtractVariablesFromString(substitution.TrimArrayIndex(val), "params")
if paramLength, ok := arrayParamLengths[paramName[0]]; ok {
if idx >= paramLength {
outofBoundParams.Insert(val)
}
}
}
if outofBoundParams.Len() > 0 {
return pipelineErrors.WrapUserError(fmt.Errorf("non-existent param references:%v", outofBoundParams.List()))
}
return nil
}
func validateStepHasStepActionParameters(stepParams v1.Params, stepActionDefaults []v1.ParamSpec) error {
stepActionParams := sets.String{}
requiredStepActionParams := []string{}
for _, sa := range stepActionDefaults {
stepActionParams.Insert(sa.Name)
if sa.Default == nil {
requiredStepActionParams = append(requiredStepActionParams, sa.Name)
}
}
stepProvidedParams := sets.String{}
extra := []string{}
for _, sp := range stepParams {
stepProvidedParams.Insert(sp.Name)
if !stepActionParams.Has(sp.Name) {
// Extra parameter that is not needed
extra = append(extra, sp.Name)
}
}
if len(extra) > 0 {
return fmt.Errorf("extra params passed by Step to StepAction: %v", extra)
}
missing := []string{}
for _, requiredParam := range requiredStepActionParams {
if !stepProvidedParams.Has(requiredParam) {
// Missing required param
missing = append(missing, requiredParam)
}
}
if len(missing) > 0 {
return fmt.Errorf("non-existent params in Step: %v", missing)
}
return nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package taskrun
import (
"context"
"errors"
"fmt"
"reflect"
"slices"
"strings"
"time"
"github.com/tektoncd/pipeline/internal/sidecarlogresults"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
taskrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun"
listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
alphalisters "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1"
ctrl "github.com/tektoncd/pipeline/pkg/controller"
"github.com/tektoncd/pipeline/pkg/internal/affinityassistant"
"github.com/tektoncd/pipeline/pkg/internal/computeresources"
"github.com/tektoncd/pipeline/pkg/internal/defaultresourcerequirements"
resolutionutil "github.com/tektoncd/pipeline/pkg/internal/resolution"
podconvert "github.com/tektoncd/pipeline/pkg/pod"
tknreconciler "github.com/tektoncd/pipeline/pkg/reconciler"
"github.com/tektoncd/pipeline/pkg/reconciler/apiserver"
"github.com/tektoncd/pipeline/pkg/reconciler/events"
"github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources"
"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
"github.com/tektoncd/pipeline/pkg/remote"
resolution "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/spire"
"github.com/tektoncd/pipeline/pkg/taskrunmetrics"
"github.com/tektoncd/pipeline/pkg/trustedresources"
"github.com/tektoncd/pipeline/pkg/workspace"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
corev1Listers "k8s.io/client-go/listers/core/v1"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
"knative.dev/pkg/changeset"
"knative.dev/pkg/controller"
"knative.dev/pkg/kmap"
"knative.dev/pkg/kmeta"
"knative.dev/pkg/logging"
pkgreconciler "knative.dev/pkg/reconciler"
"sigs.k8s.io/yaml"
)
// Reconciler implements controller.Reconciler for Configuration resources.
type Reconciler struct {
KubeClientSet kubernetes.Interface
PipelineClientSet clientset.Interface
Images pipeline.Images
Clock clock.PassiveClock
// listers index properties about resources
spireClient spire.ControllerAPIClient
taskRunLister listers.TaskRunLister
limitrangeLister corev1Listers.LimitRangeLister
podLister corev1Listers.PodLister
verificationPolicyLister alphalisters.VerificationPolicyLister
cloudEventClient cloudevent.CEClient
entrypointCache podconvert.EntrypointCache
metrics *taskrunmetrics.Recorder
pvcHandler volumeclaim.PvcHandler
resolutionRequester resolution.Requester
tracerProvider trace.TracerProvider
}
const ImagePullBackOff = "ImagePullBackOff"
var (
// Check that our Reconciler implements taskrunreconciler.Interface
_ taskrunreconciler.Interface = (*Reconciler)(nil)
// Pod failure reasons that trigger failure of the TaskRun
podFailureReasons = map[string]struct{}{
ImagePullBackOff: {},
"InvalidImageName": {},
}
)
// ReconcileKind compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Task Run
// resource with the current status of the resource.
func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1.TaskRun) pkgreconciler.Event {
logger := logging.FromContext(ctx)
ctx = cloudevent.ToContext(ctx, c.cloudEventClient)
ctx = initTracing(ctx, c.tracerProvider, tr)
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "TaskRun:ReconcileKind")
defer span.End()
span.SetAttributes(attribute.String("taskrun", tr.Name), attribute.String("namespace", tr.Namespace))
// Read the initial condition
before := tr.Status.GetCondition(apis.ConditionSucceeded)
// Record the duration and count after the reconcile cycle.
defer c.durationAndCountMetrics(ctx, tr, before)
// If the TaskRun is just starting, this will also set the starttime,
// from which the timeout will immediately begin counting down.
if !tr.HasStarted() {
tr.Status.InitializeConditions()
// In case node time was not synchronized, when controller has been scheduled to other nodes.
if tr.Status.StartTime.Sub(tr.CreationTimestamp.Time) < 0 {
logger.Warnf("TaskRun %s createTimestamp %s is after the taskRun started %s", tr.GetNamespacedName().String(), tr.CreationTimestamp, tr.Status.StartTime)
tr.Status.StartTime = &tr.CreationTimestamp
}
// Emit events. During the first reconcile the status of the TaskRun may change twice
// from not Started to Started and then to Running, so we need to sent the event here
// and at the end of 'Reconcile' again.
// We also want to send the "Started" event as soon as possible for anyone who may be waiting
// on the event to perform user facing initialisations, such has reset a CI check status
afterCondition := tr.Status.GetCondition(apis.ConditionSucceeded)
events.Emit(ctx, nil, afterCondition, tr)
}
// If the TaskRun is complete, run some post run fixtures when applicable
if tr.IsDone() {
logger.Infof("taskrun done : %s \n", tr.Name)
// We may be reading a version of the object that was stored at an older version
// and may not have had all of the assumed default specified.
tr.SetDefaults(ctx)
useTektonSidecar := true
if config.FromContextOrDefaults(ctx).FeatureFlags.EnableKubernetesSidecar {
dc := c.KubeClientSet.Discovery()
sv, err := dc.ServerVersion()
if err != nil {
return err
}
if podconvert.IsNativeSidecarSupport(sv) {
useTektonSidecar = false
logger.Infof("Using Kubernetes Native Sidecars \n")
}
}
if useTektonSidecar {
if err := c.stopSidecars(ctx, tr); err != nil {
return err
}
}
return c.finishReconcileUpdateEmitEvents(ctx, tr, before, nil)
}
// If the TaskRun is cancelled, kill resources and update status
if tr.IsCancelled() {
message := fmt.Sprintf("TaskRun %q was cancelled. %s", tr.Name, tr.Spec.StatusMessage)
err := c.failTaskRun(ctx, tr, v1.TaskRunReasonCancelled, message)
return c.finishReconcileUpdateEmitEvents(ctx, tr, before, err)
}
// Check if the TaskRun has timed out; if it is, this will set its status
// accordingly.
if tr.HasTimedOut(ctx, c.Clock) {
// Before failing the TaskRun, ensure step statuses are populated from the pod
// This prevents a race condition where the timeout occurs before pod status is fetched
if err := c.updateStepStatusesFromPod(ctx, tr); err != nil {
logger.Warnf("Failed to update step statuses from pod before timeout: %v", err)
}
message := fmt.Sprintf("TaskRun %q failed to finish within %q", tr.Name, tr.GetTimeout(ctx))
err := c.failTaskRun(ctx, tr, v1.TaskRunReasonTimedOut, message)
return c.finishReconcileUpdateEmitEvents(ctx, tr, before, err)
}
// Check for Pod Failures
if failed, reason, message := c.checkPodFailed(ctx, tr); failed {
err := c.failTaskRun(ctx, tr, reason, message)
return c.finishReconcileUpdateEmitEvents(ctx, tr, before, err)
}
// prepare fetches all required resources, validates them together with the
// taskrun, runs API conversions. In case of error we update, emit events and return.
_, rtr, err := c.prepare(ctx, tr)
if err != nil {
logger.Errorf("TaskRun prepare error: %v", err.Error())
// We only return an error if update failed, otherwise we don't want to
// reconcile an invalid TaskRun anymore
span.SetStatus(codes.Error, "taskrun prepare error")
span.RecordError(err)
return c.finishReconcileUpdateEmitEvents(ctx, tr, nil, err)
}
// Store the condition before reconcile
before = tr.Status.GetCondition(apis.ConditionSucceeded)
// Reconcile this copy of the task run and then write back any status
// updates regardless of whether the reconciliation errored out.
if err = c.reconcile(ctx, tr, rtr); err != nil {
logger.Errorf("Reconcile: %v", err.Error())
if errors.Is(err, sidecarlogresults.ErrSizeExceeded) {
cfg := config.FromContextOrDefaults(ctx)
message := fmt.Sprintf("%s TaskRun \"%q\" failed: results exceeded size limit %d bytes", pipelineErrors.UserErrorLabel, tr.Name, cfg.FeatureFlags.MaxResultSize)
err := c.failTaskRun(ctx, tr, v1.TaskRunReasonResultLargerThanAllowedLimit, message)
return c.finishReconcileUpdateEmitEvents(ctx, tr, before, err)
}
}
// Emit events (only when ConditionSucceeded was changed)
if err = c.finishReconcileUpdateEmitEvents(ctx, tr, before, err); err != nil {
return err
}
if tr.Status.StartTime != nil {
// Compute the time since the task started.
elapsed := c.Clock.Since(tr.Status.StartTime.Time)
// Snooze this resource until the timeout has elapsed.
timeout := tr.GetTimeout(ctx)
waitTime := timeout - elapsed
if timeout == config.NoTimeoutDuration {
waitTime = time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) * time.Minute
}
return controller.NewRequeueAfter(waitTime)
}
return nil
}
func (c *Reconciler) checkPodFailed(ctx context.Context, tr *v1.TaskRun) (bool, v1.TaskRunReason, string) {
imagePullBackOffTimeoutPodConditions := []string{string(corev1.PodInitialized), "PodReadyToStartContainers"}
for _, step := range tr.Status.Steps {
if step.Waiting != nil {
if _, found := podFailureReasons[step.Waiting.Reason]; found {
if step.Waiting.Reason == ImagePullBackOff {
imagePullBackOffTimeOut := config.FromContextOrDefaults(ctx).Defaults.DefaultImagePullBackOffTimeout
// only attempt to recover from the imagePullBackOff if specified
if imagePullBackOffTimeOut.Seconds() != 0 {
p, err := c.KubeClientSet.CoreV1().Pods(tr.Namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{})
if err != nil {
message := fmt.Sprintf(`the step %q in TaskRun %q failed to pull the image %q and the pod with error: "%s."`, step.Name, tr.Name, step.ImageID, err)
return true, v1.TaskRunReasonImagePullFailed, message
}
for _, condition := range p.Status.Conditions {
// check the pod condition to get the time when the pod was ready to start containers / initialized.
// keep trying until the pod schedule time has exceeded the specified imagePullBackOff timeout duration
if slices.Contains(imagePullBackOffTimeoutPodConditions, string(condition.Type)) {
if c.Clock.Since(condition.LastTransitionTime.Time) < imagePullBackOffTimeOut {
return false, "", ""
}
}
}
}
}
image := step.ImageID
message := fmt.Sprintf(`the step %q in TaskRun %q failed to pull the image %q. The pod errored with the message: "%s."`, step.Name, tr.Name, image, step.Waiting.Message)
return true, v1.TaskRunReasonImagePullFailed, message
}
}
}
for _, sidecar := range tr.Status.Sidecars {
if sidecar.Waiting != nil {
if _, found := podFailureReasons[sidecar.Waiting.Reason]; found {
if sidecar.Waiting.Reason == ImagePullBackOff {
imagePullBackOffTimeOut := config.FromContextOrDefaults(ctx).Defaults.DefaultImagePullBackOffTimeout
// only attempt to recover from the imagePullBackOff if specified
if imagePullBackOffTimeOut.Seconds() != 0 {
p, err := c.KubeClientSet.CoreV1().Pods(tr.Namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{})
if err != nil {
message := fmt.Sprintf(`the sidecar %q in TaskRun %q failed to pull the image %q and the pod with error: "%s."`, sidecar.Name, tr.Name, sidecar.ImageID, err)
return true, v1.TaskRunReasonImagePullFailed, message
}
for _, condition := range p.Status.Conditions {
// check the pod condition to get the time when the pod was ready to start containers / initialized.
// keep trying until the pod schedule time has exceeded the specified imagePullBackOff timeout duration
if slices.Contains(imagePullBackOffTimeoutPodConditions, string(condition.Type)) {
if c.Clock.Since(condition.LastTransitionTime.Time) < imagePullBackOffTimeOut {
return false, "", ""
}
}
}
}
}
image := sidecar.ImageID
message := fmt.Sprintf(`the sidecar %q in TaskRun %q failed to pull the image %q. The pod errored with the message: "%s."`, sidecar.Name, tr.Name, image, sidecar.Waiting.Message)
return true, v1.TaskRunReasonImagePullFailed, message
}
}
}
return false, "", ""
}
func (c *Reconciler) durationAndCountMetrics(ctx context.Context, tr *v1.TaskRun, beforeCondition *apis.Condition) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "durationAndCountMetrics")
defer span.End()
logger := logging.FromContext(ctx)
if tr.IsDone() {
if err := c.metrics.DurationAndCount(ctx, tr, beforeCondition); err != nil {
logger.Warnf("Failed to log the duration and count of taskruns : %v", err)
}
}
}
func (c *Reconciler) stopSidecars(ctx context.Context, tr *v1.TaskRun) error {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "stopSidecars")
defer span.End()
logger := logging.FromContext(ctx)
// do not continue without knowing the associated pod
if tr.Status.PodName == "" {
return nil
}
// do not continue if the TaskRun was canceled or timed out as this caused the pod to be deleted in failTaskRun
condition := tr.Status.GetCondition(apis.ConditionSucceeded)
if condition != nil {
reason := v1.TaskRunReason(condition.Reason)
if reason == v1.TaskRunReasonCancelled || reason == v1.TaskRunReasonTimedOut {
return nil
}
}
pod, err := podconvert.StopSidecars(ctx, c.Images.NopImage, c.KubeClientSet, tr.Namespace, tr.Status.PodName)
if err == nil {
// Check if any SidecarStatuses are still shown as Running after stopping
// Sidecars. If any Running, update SidecarStatuses based on Pod ContainerStatuses.
if podconvert.IsSidecarStatusRunning(tr) {
err = updateStoppedSidecarStatus(pod, tr)
}
}
if k8serrors.IsNotFound(err) {
// At this stage the TaskRun has been completed if the pod is not found, it won't come back,
// it has probably evicted. We can return the error, but we consider it a permanent one.
return controller.NewPermanentError(err)
} else if err != nil {
// It is admissible for Pods to fail with concurrentModification errors
// when stopping sideCars. Instead of failing the TaskRun, we shall just
// let the reconciler requeue.
if isConcurrentModificationError(err) {
return controller.NewRequeueAfter(time.Second)
}
logger.Errorf("Error stopping sidecars for TaskRun %q: %v", tr.Name, err)
tr.Status.MarkResourceFailed(v1.TaskRunReasonStopSidecarFailed, err)
}
return nil
}
func (c *Reconciler) finishReconcileUpdateEmitEvents(ctx context.Context, tr *v1.TaskRun, beforeCondition *apis.Condition, previousError error) error {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "finishReconcileUpdateEmitEvents")
defer span.End()
logger := logging.FromContext(ctx)
afterCondition := tr.Status.GetCondition(apis.ConditionSucceeded)
if afterCondition.IsFalse() && !tr.IsCancelled() && tr.IsRetriable() {
retryTaskRun(tr, afterCondition.Message)
afterCondition = tr.Status.GetCondition(apis.ConditionSucceeded)
}
// Send k8s events and cloud events (when configured)
events.Emit(ctx, beforeCondition, afterCondition, tr)
errs := []error{previousError}
// If the Run has been completed before and remains so at present,
// no need to update the labels and annotations
skipUpdateLabelsAndAnnotations := !afterCondition.IsUnknown() && !beforeCondition.IsUnknown()
if !skipUpdateLabelsAndAnnotations {
_, err := c.updateLabelsAndAnnotations(ctx, tr)
if err != nil {
logger.Warn("Failed to update TaskRun labels/annotations", zap.Error(err))
events.EmitError(controller.GetEventRecorder(ctx), err, tr)
errs = append(errs, err)
}
}
joinedErr := errors.Join(errs...)
if controller.IsPermanentError(previousError) {
return controller.NewPermanentError(joinedErr)
}
return joinedErr
}
// `prepare` fetches resources the taskrun depends on, runs validation and conversion
// It may report errors back to Reconcile, it updates the taskrun status in case of
// error but it does not sync updates back to etcd. It does not emit events.
// All errors returned by `prepare` are always handled by `Reconcile`, so they don't cause
// the key to be re-queued directly.
// `prepare` returns spec and resources. In future we might store
// them in the TaskRun.Status so we don't need to re-run `prepare` at every
// reconcile (see https://github.com/tektoncd/pipeline/issues/2473).
func (c *Reconciler) prepare(ctx context.Context, tr *v1.TaskRun) (*v1.TaskSpec, *resources.ResolvedTask, error) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "prepare")
defer span.End()
logger := logging.FromContext(ctx)
tr.SetDefaults(ctx)
// list VerificationPolicies for trusted resources
vp, err := c.verificationPolicyLister.VerificationPolicies(tr.Namespace).List(labels.Everything())
if err != nil {
return nil, nil, fmt.Errorf("failed to list VerificationPolicies from namespace %s with error %w", tr.Namespace, err)
}
getTaskfunc := resources.GetTaskFuncFromTaskRun(ctx, c.KubeClientSet, c.PipelineClientSet, c.resolutionRequester, tr, vp)
taskMeta, taskSpec, err := resources.GetTaskData(ctx, tr, getTaskfunc)
switch {
case errors.Is(err, remote.ErrRequestInProgress):
message := fmt.Sprintf("TaskRun %s/%s awaiting remote resource", tr.Namespace, tr.Name)
tr.Status.MarkResourceOngoing(v1.TaskRunReasonResolvingTaskRef, message)
return nil, nil, err
case errors.Is(err, apiserver.ErrReferencedObjectValidationFailed), errors.Is(err, apiserver.ErrCouldntValidateObjectPermanent):
tr.Status.MarkResourceFailed(v1.TaskRunReasonTaskFailedValidation, err)
return nil, nil, controller.NewPermanentError(err)
case errors.Is(err, apiserver.ErrCouldntValidateObjectRetryable):
return nil, nil, err
case err != nil:
logger.Errorf("Failed to determine Task spec to use for taskrun %s: %v", tr.Name, err)
if resolutioncommon.IsErrTransient(err) {
return nil, nil, err
}
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedResolution, err)
return nil, nil, controller.NewPermanentError(err)
default:
// Store the fetched TaskSpec on the TaskRun for auditing
if err := storeTaskSpecAndMergeMeta(ctx, tr, taskSpec, taskMeta); err != nil {
logger.Errorf("Failed to store TaskSpec on TaskRun.Status for taskrun %s: %v", tr.Name, err)
}
}
steps, err := resources.GetStepActionsData(ctx, *taskSpec, tr, c.PipelineClientSet, c.KubeClientSet, c.resolutionRequester)
switch {
case errors.Is(err, remote.ErrRequestInProgress):
message := fmt.Sprintf("TaskRun %s/%s awaiting remote StepAction", tr.Namespace, tr.Name)
tr.Status.MarkResourceOngoing(v1.TaskRunReasonResolvingStepActionRef, message)
return nil, nil, err
case errors.Is(err, apiserver.ErrReferencedObjectValidationFailed), errors.Is(err, apiserver.ErrCouldntValidateObjectPermanent):
tr.Status.MarkResourceFailed(v1.TaskRunReasonTaskFailedValidation, err)
return nil, nil, controller.NewPermanentError(err)
case errors.Is(err, apiserver.ErrCouldntValidateObjectRetryable):
return nil, nil, err
case err != nil:
logger.Errorf("Failed to determine StepAction to use for TaskRun %s: %v", tr.Name, err)
if resolutioncommon.IsErrTransient(err) {
return nil, nil, err
}
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedResolution, err)
return nil, nil, controller.NewPermanentError(err)
default:
// Store the fetched StepActions to TaskSpec, and update the stored TaskSpec again
taskSpec.Steps = steps
if err := storeTaskSpecAndMergeMeta(ctx, tr, taskSpec, taskMeta); err != nil {
logger.Errorf("Failed to store TaskSpec on TaskRun.Status for taskrun %s: %v", tr.Name, err)
}
}
if taskMeta.VerificationResult != nil {
switch taskMeta.VerificationResult.VerificationResultType {
case trustedresources.VerificationError:
logger.Errorf("TaskRun %s/%s referred task failed signature verification", tr.Namespace, tr.Name)
tr.Status.MarkResourceFailed(v1.TaskRunReasonResourceVerificationFailed, taskMeta.VerificationResult.Err)
tr.Status.SetCondition(&apis.Condition{
Type: trustedresources.ConditionTrustedResourcesVerified,
Status: corev1.ConditionFalse,
Message: taskMeta.VerificationResult.Err.Error(),
})
return nil, nil, controller.NewPermanentError(taskMeta.VerificationResult.Err)
case trustedresources.VerificationSkip:
// do nothing
case trustedresources.VerificationWarn:
tr.Status.SetCondition(&apis.Condition{
Type: trustedresources.ConditionTrustedResourcesVerified,
Status: corev1.ConditionFalse,
Message: taskMeta.VerificationResult.Err.Error(),
})
case trustedresources.VerificationPass:
tr.Status.SetCondition(&apis.Condition{
Type: trustedresources.ConditionTrustedResourcesVerified,
Status: corev1.ConditionTrue,
})
}
}
rtr := &resources.ResolvedTask{
TaskName: taskMeta.Name,
TaskSpec: taskSpec,
Kind: resources.GetTaskKind(tr),
}
if err := validateTaskSpecRequestResources(taskSpec); err != nil {
logger.Errorf("TaskRun %s taskSpec request resources are invalid: %v", tr.Name, err)
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedValidation, err)
return nil, nil, controller.NewPermanentError(err)
}
if err := ValidateResolvedTask(ctx, tr.Spec.Params, &v1.Matrix{}, rtr); err != nil {
logger.Errorf("TaskRun %q resources are invalid: %v", tr.Name, err)
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedValidation, err)
return nil, nil, controller.NewPermanentError(err)
}
if config.FromContextOrDefaults(ctx).FeatureFlags.EnableParamEnum {
if err := ValidateEnumParam(ctx, tr.Spec.Params, rtr.TaskSpec.Params); err != nil {
logger.Errorf("TaskRun %q Param Enum validation failed: %v", tr.Name, err)
tr.Status.MarkResourceFailed(v1.TaskRunReasonInvalidParamValue, err)
return nil, nil, controller.NewPermanentError(err)
}
}
if err := resources.ValidateParamArrayIndex(rtr.TaskSpec, tr.Spec.Params); err != nil {
logger.Errorf("TaskRun %q Param references are invalid: %v", tr.Name, err)
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedValidation, err)
return nil, nil, controller.NewPermanentError(err)
}
if err := c.updateTaskRunWithDefaultWorkspaces(ctx, tr, taskSpec); err != nil {
logger.Errorf("Failed to update taskrun %s with default workspace: %v", tr.Name, err)
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedResolution, err)
return nil, nil, controller.NewPermanentError(err)
}
var workspaceDeclarations []v1.WorkspaceDeclaration
// Propagating workspaces allows users to skip declarations
// In order to validate the workspace bindings we create declarations based on
// the workspaces provided in the task run spec. We only allow this feature for embedded taskSpec.
if tr.Spec.TaskSpec != nil {
for _, ws := range tr.Spec.Workspaces {
wspaceDeclaration := v1.WorkspaceDeclaration{Name: ws.Name}
workspaceDeclarations = append(workspaceDeclarations, wspaceDeclaration)
}
workspaceDeclarations = append(workspaceDeclarations, taskSpec.Workspaces...)
} else {
workspaceDeclarations = taskSpec.Workspaces
}
if err := workspace.ValidateBindings(ctx, workspaceDeclarations, tr.Spec.Workspaces); err != nil {
logger.Errorf("TaskRun %q workspaces are invalid: %v", tr.Name, err)
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedValidation, err)
return nil, nil, controller.NewPermanentError(err)
}
aaBehavior, err := affinityassistant.GetAffinityAssistantBehavior(ctx)
if err != nil {
return nil, nil, controller.NewPermanentError(err)
}
if aaBehavior == affinityassistant.AffinityAssistantPerWorkspace {
if err := workspace.ValidateOnlyOnePVCIsUsed(tr.Spec.Workspaces); err != nil {
logger.Errorf("TaskRun %q workspaces incompatible with Affinity Assistant: %v", tr.Name, err)
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedValidation, err)
return nil, nil, controller.NewPermanentError(err)
}
}
if err := validateOverrides(taskSpec, &tr.Spec); err != nil {
logger.Errorf("TaskRun %q step or sidecar overrides are invalid: %v", tr.Name, err)
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedValidation, err)
return nil, nil, controller.NewPermanentError(err)
}
return taskSpec, rtr, nil
}
// `reconcile` creates the Pod associated to the TaskRun, and it pulls back status
// updates from the Pod to the TaskRun.
// It reports errors back to Reconcile, it updates the taskrun status in case of
// error but it does not sync updates back to etcd. It does not emit events.
// `reconcile` consumes spec and resources returned by `prepare`
func (c *Reconciler) reconcile(ctx context.Context, tr *v1.TaskRun, rtr *resources.ResolvedTask) error {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "reconcile")
defer span.End()
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
var err error
// Get the TaskRun's Pod if it should have one. Otherwise, create the Pod.
var pod *corev1.Pod
if tr.Status.PodName != "" {
pod, err = c.podLister.Pods(tr.Namespace).Get(tr.Status.PodName)
if k8serrors.IsNotFound(err) {
// Keep going, this will result in the Pod being created below.
} else if err != nil {
// This is considered a transient error, so we return error, do not update
// the task run condition, and return an error which will cause this key to
// be requeued for reconcile.
logger.Errorf("Error getting pod %q: %v", tr.Status.PodName, err)
return err
}
} else {
// List pods that have a label with this TaskRun name. Do not include other labels from the
// TaskRun in this selector. The user could change them during the lifetime of the TaskRun so the
// current labels may not be set on a previously created Pod.
labelSelector := labels.Set{pipeline.TaskRunLabelKey: tr.Name}
pos, err := c.podLister.Pods(tr.Namespace).List(labelSelector.AsSelector())
if err != nil {
logger.Errorf("Error listing pods: %v", err)
return err
}
for index := range pos {
po := pos[index]
if metav1.IsControlledBy(po, tr) && !podconvert.DidTaskRunFail(po) && !podconvert.IsPodArchived(po, &tr.Status) {
pod = po
}
}
}
// Please note that this block is required to run before `applyParamsContextsResultsAndWorkspaces` is called the first time,
// and that `applyParamsContextsResultsAndWorkspaces` _must_ be called on every reconcile.
if pod == nil && tr.HasVolumeClaimTemplate() {
for _, ws := range tr.Spec.Workspaces {
if err := c.pvcHandler.CreatePVCFromVolumeClaimTemplate(ctx, ws, *kmeta.NewControllerRef(tr), tr.Namespace); err != nil {
logger.Errorf("Failed to create PVC for TaskRun %s: %v", tr.Name, err)
tr.Status.MarkResourceFailed(volumeclaim.ReasonCouldntCreateWorkspacePVC,
fmt.Errorf("failed to create PVC for TaskRun %s workspaces correctly: %w",
fmt.Sprintf("%s/%s", tr.Namespace, tr.Name), err))
return controller.NewPermanentError(err)
}
}
taskRunWorkspaces := applyVolumeClaimTemplates(tr.Spec.Workspaces, *kmeta.NewControllerRef(tr))
// This is used by createPod below. Changes to the Spec are not updated.
tr.Spec.Workspaces = taskRunWorkspaces
}
resources.ApplyParametersToWorkspaceBindings(rtr.TaskSpec, tr)
// Get the randomized volume names assigned to workspace bindings
workspaceVolumes := workspace.CreateVolumes(tr.Spec.Workspaces)
ts, err := applyParamsContextsResultsAndWorkspaces(ctx, tr, rtr, workspaceVolumes)
if err != nil {
logger.Errorf("Error updating task spec parameters, contexts, results and workspaces: %s", err)
return err
}
tr.Status.TaskSpec = ts
if len(tr.Status.TaskSpec.Steps) > 0 {
logger.Debugf("set taskspec for %s/%s - script: %s", tr.Namespace, tr.Name, tr.Status.TaskSpec.Steps[0].Script)
}
if pod == nil {
pod, err = c.createPod(ctx, ts, tr, rtr, workspaceVolumes)
if err != nil {
newErr := c.handlePodCreationError(tr, err)
logger.Errorf("Failed to create task run pod for taskrun %q: %v", tr.Name, newErr)
return newErr
}
}
if podconvert.IsPodExceedingNodeResources(pod) {
recorder.Eventf(tr, corev1.EventTypeWarning, podconvert.ReasonExceededNodeResources, "Insufficient resources to schedule pod %q", pod.Name)
}
if podconvert.SidecarsReady(pod.Status) {
if err := podconvert.UpdateReady(ctx, c.KubeClientSet, *pod); err != nil {
return err
}
if err := c.metrics.RecordPodLatency(ctx, pod, tr); err != nil {
logger.Warnf("Failed to log the metrics : %v", err)
}
}
// Convert the Pod's status to the equivalent TaskRun Status.
tr.Status, err = podconvert.MakeTaskRunStatus(ctx, logger, *tr, pod, c.KubeClientSet, rtr.TaskSpec)
if err != nil {
return err
}
if err := validateTaskRunResults(tr, rtr.TaskSpec); err != nil {
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedValidation, err)
return err
}
logger.Infof("Successfully reconciled taskrun %s/%s with status: %#v", tr.Name, tr.Namespace, tr.Status.GetCondition(apis.ConditionSucceeded))
return nil
}
func (c *Reconciler) updateTaskRunWithDefaultWorkspaces(ctx context.Context, tr *v1.TaskRun, taskSpec *v1.TaskSpec) error {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "updateTaskRunWithDefaultWorkspaces")
defer span.End()
configMap := config.FromContextOrDefaults(ctx)
defaults := configMap.Defaults
if defaults.DefaultTaskRunWorkspaceBinding != "" {
var defaultWS v1.WorkspaceBinding
if err := yaml.Unmarshal([]byte(defaults.DefaultTaskRunWorkspaceBinding), &defaultWS); err != nil {
return fmt.Errorf("failed to unmarshal %v", defaults.DefaultTaskRunWorkspaceBinding)
}
workspaceBindings := map[string]v1.WorkspaceBinding{}
for _, tsWorkspace := range taskSpec.Workspaces {
if !tsWorkspace.Optional {
workspaceBindings[tsWorkspace.Name] = v1.WorkspaceBinding{
Name: tsWorkspace.Name,
SubPath: defaultWS.SubPath,
VolumeClaimTemplate: defaultWS.VolumeClaimTemplate,
PersistentVolumeClaim: defaultWS.PersistentVolumeClaim,
EmptyDir: defaultWS.EmptyDir,
ConfigMap: defaultWS.ConfigMap,
Secret: defaultWS.Secret,
}
}
}
for _, trWorkspace := range tr.Spec.Workspaces {
workspaceBindings[trWorkspace.Name] = trWorkspace
}
tr.Spec.Workspaces = []v1.WorkspaceBinding{}
for _, wsBinding := range workspaceBindings {
tr.Spec.Workspaces = append(tr.Spec.Workspaces, wsBinding)
}
}
return nil
}
func (c *Reconciler) updateLabelsAndAnnotations(ctx context.Context, tr *v1.TaskRun) (*v1.TaskRun, error) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "updateLabelsAndAnnotations")
defer span.End()
// Ensure the TaskRun is properly decorated with the version of the Tekton controller processing it.
if tr.Annotations == nil {
tr.Annotations = make(map[string]string, 1)
}
tr.Annotations[podconvert.ReleaseAnnotation] = changeset.Get()
newTr, err := c.taskRunLister.TaskRuns(tr.Namespace).Get(tr.Name)
if err != nil {
return nil, fmt.Errorf("error getting TaskRun %s when updating labels/annotations: %w", tr.Name, err)
}
if !reflect.DeepEqual(tr.ObjectMeta.Labels, newTr.ObjectMeta.Labels) || !reflect.DeepEqual(tr.ObjectMeta.Annotations, newTr.ObjectMeta.Annotations) {
// Note that this uses Update vs. Patch because the former is significantly easier to test.
// If we want to switch this to Patch, then we will need to teach the utilities in test/controller.go
// to deal with Patch (setting resourceVersion, and optimistic concurrency checks).
newTr = newTr.DeepCopy()
newTr.Labels = kmap.Union(newTr.Labels, tr.Labels)
newTr.Annotations = kmap.Union(kmap.ExcludeKeys(newTr.Annotations, tknreconciler.KubectlLastAppliedAnnotationKey), tr.Annotations)
return c.PipelineClientSet.TektonV1().TaskRuns(tr.Namespace).Update(ctx, newTr, metav1.UpdateOptions{})
}
return newTr, nil
}
func (c *Reconciler) handlePodCreationError(tr *v1.TaskRun, err error) error {
switch {
case isResourceQuotaConflictError(err):
// Requeue if it runs into ResourceQuotaConflictError Error i.e https://github.com/kubernetes/kubernetes/issues/67761
tr.Status.StartTime = nil
tr.Status.MarkResourceOngoing(podconvert.ReasonPodPending, "tried to create pod, but it failed with ResourceQuotaConflictError")
return controller.NewRequeueAfter(time.Second)
case isExceededResourceQuotaError(err):
// If we are struggling to create the pod, then it hasn't started.
tr.Status.StartTime = nil
tr.Status.MarkResourceOngoing(podconvert.ReasonExceededResourceQuota, fmt.Sprint("TaskRun Pod exceeded available resources: ", err))
return controller.NewRequeueAfter(time.Minute)
case isTaskRunValidationFailed(err):
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedValidation, err)
case k8serrors.IsAlreadyExists(err):
tr.Status.MarkResourceOngoing(podconvert.ReasonPodPending, "tried to create pod, but it already exists")
case isPodAdmissionFailed(err):
tr.Status.MarkResourceFailed(podconvert.ReasonPodAdmissionFailed, err)
default:
// The pod creation failed with unknown reason. The most likely
// reason is that something is wrong with the spec of the Task, that we could
// not check with validation before - i.e. pod template fields
msg := fmt.Sprintf("failed to create task run pod %q: %v. Maybe ", tr.Name, err)
if tr.Spec.TaskRef != nil {
msg += fmt.Sprintf("missing or invalid Task %s/%s", tr.Namespace, tr.Spec.TaskRef.Name)
} else {
msg += "invalid TaskSpec"
}
err = controller.NewPermanentError(errors.New(msg))
tr.Status.MarkResourceFailed(podconvert.ReasonPodCreationFailed, err)
}
return err
}
// failTaskRun stops a TaskRun with the provided Reason
// If a pod is associated to the TaskRun, it stops it
// failTaskRun function may return an error in case the pod could not be deleted
// failTaskRun may update the local TaskRun status, but it won't push the updates to etcd
func (c *Reconciler) failTaskRun(ctx context.Context, tr *v1.TaskRun, reason v1.TaskRunReason, message string) error {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "failTaskRun")
defer span.End()
logger := logging.FromContext(ctx)
logger.Warnf("stopping task run %q because of %q", tr.Name, reason)
tr.Status.MarkResourceFailed(reason, errors.New(message))
completionTime := metav1.Time{Time: c.Clock.Now()}
// update tr completed time
tr.Status.CompletionTime = &completionTime
if tr.Status.PodName == "" {
logger.Warnf("task run %q has no pod running yet", tr.Name)
return nil
}
// When the TaskRun is failed, we mark all running/waiting steps as failed
// This is regardless of what happens with the Pod, which may be cancelled,
// deleted, non existing or fail to delete
// See https://github.com/tektoncd/pipeline/issues/8293 for more details.
terminateStepsInPod(tr, reason)
var err error
if (reason == v1.TaskRunReasonCancelled || reason == v1.TaskRunReasonTimedOut) && (config.FromContextOrDefaults(ctx).FeatureFlags.EnableKeepPodOnCancel) {
logger.Infof("Canceling task run %q by entrypoint, Reason: %s", tr.Name, reason)
err = podconvert.CancelPod(ctx, c.KubeClientSet, tr.Namespace, tr.Status.PodName)
} else {
err = c.KubeClientSet.CoreV1().Pods(tr.Namespace).Delete(ctx, tr.Status.PodName, metav1.DeleteOptions{})
}
if err != nil && !k8serrors.IsNotFound(err) {
logger.Errorf("Failed to terminate pod %s: %v", tr.Status.PodName, err)
return err
}
return nil
}
// updateStepStatusesFromPod fetches the pod and updates step statuses in the TaskRun
// This is called before failing a TaskRun to ensure step statuses are populated
func (c *Reconciler) updateStepStatusesFromPod(ctx context.Context, tr *v1.TaskRun) error {
logger := logging.FromContext(ctx)
// If there's no pod yet, nothing to update
if tr.Status.PodName == "" {
return nil
}
// Fetch the pod
pod, err := c.podLister.Pods(tr.Namespace).Get(tr.Status.PodName)
if k8serrors.IsNotFound(err) {
// Pod doesn't exist yet, nothing to update
return nil
} else if err != nil {
return err
}
// Update step statuses from pod using the existing MakeTaskRunStatus function
// This ensures consistency with the normal reconciliation path
status, err := podconvert.MakeTaskRunStatus(ctx, logger, *tr, pod, c.KubeClientSet, tr.Status.TaskSpec)
if err != nil {
return err
}
// Only update the Steps field to avoid overwriting other status fields
tr.Status.Steps = status.Steps
return nil
}
// terminateStepsInPod updates step states for TaskRun on TaskRun object since pod has been deleted for cancel or timeout
func terminateStepsInPod(tr *v1.TaskRun, taskRunReason v1.TaskRunReason) {
for i, step := range tr.Status.Steps {
// If running, include StartedAt for when step began running
if step.Running != nil {
step.Terminated = &corev1.ContainerStateTerminated{
ExitCode: 1,
StartedAt: step.Running.StartedAt,
FinishedAt: *tr.Status.CompletionTime,
// TODO(#7385): replace with more pod/container termination reason instead of overloading taskRunReason
Reason: taskRunReason.String(),
Message: fmt.Sprintf("Step %s terminated as pod %s is terminated", step.Name, tr.Status.PodName),
}
step.TerminationReason = taskRunReason.String()
step.Running = nil
tr.Status.Steps[i] = step
}
if step.Waiting != nil {
step.Terminated = &corev1.ContainerStateTerminated{
ExitCode: 1,
StartedAt: tr.CreationTimestamp, // startedAt cannot be null due to CRD schema validation
FinishedAt: *tr.Status.CompletionTime,
// TODO(#7385): replace with more pod/container termination reason instead of overloading taskRunReason
Reason: taskRunReason.String(),
Message: fmt.Sprintf("Step %s terminated as pod %s is terminated", step.Name, tr.Status.PodName),
}
step.TerminationReason = taskRunReason.String()
step.Waiting = nil
tr.Status.Steps[i] = step
}
}
}
// createPod creates a Pod based on the Task's configuration, with pvcName as a volumeMount
// TODO(dibyom): Refactor resource setup/substitution logic to its own function in the resources package
func (c *Reconciler) createPod(ctx context.Context, ts *v1.TaskSpec, tr *v1.TaskRun, rtr *resources.ResolvedTask, workspaceVolumes map[string]corev1.Volume) (*corev1.Pod, error) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "createPod")
defer span.End()
logger := logging.FromContext(ctx)
// We don't want to mutate tr.Status.TaskSpec inside
// the createPod function. It's possible that pod will
// be killed before running and when rescheduling it
// will cause bugs. As this function could be called
// multiple times, we copy tr.Status.TaskSpec to help
// in scheduling Pod.
ts = ts.DeepCopy()
// By this time, params and workspaces should be propagated down so we can
// validate that all parameter variables and workspaces used in the TaskSpec are declared by the Task.
if validateErr := v1.ValidateUsageOfDeclaredParameters(ctx, ts.Steps, ts.Params); validateErr != nil {
logger.Errorf("Failed to create a pod for taskrun: %s due to task validation error %v", tr.Name, validateErr)
return nil, validateErr
}
if validateErr := ts.Validate(ctx); validateErr != nil {
logger.Errorf("Failed to create a pod for taskrun: %s due to task validation error %v", tr.Name, validateErr)
return nil, validateErr
}
var err error
ts, err = workspace.Apply(ctx, *ts, tr.Spec.Workspaces, workspaceVolumes)
if err != nil {
logger.Errorf("Failed to create a pod for taskrun: %s due to workspace error %v", tr.Name, err)
return nil, err
}
// Apply path substitutions for the legacy credentials helper (aka "creds-init")
ts = resources.ApplyCredentialsPath(ts, pipeline.CredsDir)
// Apply parameter substitution to PodTemplate if it exists
if tr.Spec.PodTemplate != nil {
var defaults []v1.ParamSpec
if len(ts.Params) > 0 {
defaults = append(defaults, ts.Params...)
}
updatedPodTemplate := resources.ApplyPodTemplateReplacements(tr.Spec.PodTemplate, tr, defaults...)
if updatedPodTemplate != nil {
trCopy := tr.DeepCopy()
trCopy.Spec.PodTemplate = updatedPodTemplate
tr = trCopy
}
}
podbuilder := podconvert.Builder{
Images: c.Images,
KubeClient: c.KubeClientSet,
EntrypointCache: c.entrypointCache,
}
pod, err := podbuilder.Build(ctx, tr, *ts,
defaultresourcerequirements.NewTransformer(ctx),
computeresources.NewTransformer(ctx, tr.Namespace, c.limitrangeLister),
affinityassistant.NewTransformer(ctx, tr.Annotations),
)
if err != nil {
return nil, fmt.Errorf("translating TaskSpec to Pod: %w", err)
}
// Stash the podname in case there's create conflict so that we can try
// to fetch it.
podName := pod.Name
cfg := config.FromContextOrDefaults(ctx)
if !cfg.FeatureFlags.EnableWaitExponentialBackoff {
pod, err = c.KubeClientSet.CoreV1().Pods(tr.Namespace).Create(ctx, pod, metav1.CreateOptions{})
} else {
backoff := wait.Backoff{
Duration: cfg.WaitExponentialBackoff.Duration, // Initial delay before retry
Factor: cfg.WaitExponentialBackoff.Factor, // Multiplier for exponential growth
Steps: cfg.WaitExponentialBackoff.Steps, // Maximum number of retry attempts
Cap: cfg.WaitExponentialBackoff.Cap, // Maximum time spent before giving up
}
var result *corev1.Pod
err = wait.ExponentialBackoff(backoff, func() (bool, error) {
result = nil
result, err = c.KubeClientSet.CoreV1().Pods(tr.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
if ctrl.IsWebhookTimeout(err) {
return false, nil // retry
}
return false, err // do not retry
}
pod = result
return true, nil
})
}
if err == nil && willOverwritePodSetAffinity(tr) {
if recorder := controller.GetEventRecorder(ctx); recorder != nil {
recorder.Eventf(tr, corev1.EventTypeWarning, "PodAffinityOverwrite", "Pod template affinity is overwritten by affinity assistant for pod %q", pod.Name)
}
}
// If the pod failed to be created because it already exists, try to fetch
// from the informer and return if successful. Otherwise, return the
// original error.
if err != nil && k8serrors.IsAlreadyExists(err) {
if p, getErr := c.podLister.Pods(tr.Namespace).Get(podName); getErr == nil {
return p, nil
}
}
if err != nil {
return nil, err
}
return pod, nil
}
// applyParamsContextsResultsAndWorkspaces applies paramater, context, results and workspace substitutions to the TaskSpec.
func applyParamsContextsResultsAndWorkspaces(ctx context.Context, tr *v1.TaskRun, rtr *resources.ResolvedTask, workspaceVolumes map[string]corev1.Volume) (*v1.TaskSpec, error) {
ts := rtr.TaskSpec.DeepCopy()
var defaults []v1.ParamSpec
if len(ts.Params) > 0 {
defaults = append(defaults, ts.Params...)
}
// Apply parameter substitution from the taskrun.
ts = resources.ApplyParameters(ts, tr, defaults...)
// Apply context substitution from the taskrun
ts = resources.ApplyContexts(ts, rtr.TaskName, tr)
// Apply task result substitution
ts = resources.ApplyResults(ts)
// Apply step Artifacts substitution
ts = resources.ApplyArtifacts(ts)
// Apply step exitCode path substitution
ts = resources.ApplyStepExitCodePath(ts)
// Apply workspace resource substitution
// propagate workspaces from taskrun to task.
twn := []string{}
for _, tw := range ts.Workspaces {
twn = append(twn, tw.Name)
}
for _, trw := range tr.Spec.Workspaces {
skip := false
for _, tw := range twn {
if tw == trw.Name {
skip = true
break
}
}
if !skip {
ts.Workspaces = append(ts.Workspaces, v1.WorkspaceDeclaration{Name: trw.Name})
}
}
ts = resources.ApplyWorkspaces(ctx, ts, ts.Workspaces, tr.Spec.Workspaces, workspaceVolumes)
return ts, nil
}
func isExceededResourceQuotaError(err error) bool {
return err != nil && k8serrors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota")
}
func isTaskRunValidationFailed(err error) bool {
return err != nil && strings.Contains(err.Error(), "TaskRun validation failed")
}
func isPodAdmissionFailed(err error) bool {
return err != nil && k8serrors.IsForbidden(err) && (strings.Contains(err.Error(), "violates PodSecurity") ||
strings.Contains(err.Error(), "security context constraint"))
}
// updateStoppedSidecarStatus updates SidecarStatus for sidecars that were
// terminated by nop image
func updateStoppedSidecarStatus(pod *corev1.Pod, tr *v1.TaskRun) error {
tr.Status.Sidecars = []v1.SidecarState{}
for _, s := range pod.Status.ContainerStatuses {
if podconvert.IsContainerSidecar(s.Name) {
var sidecarState corev1.ContainerState
if s.LastTerminationState.Terminated != nil {
// Sidecar has successfully by terminated by nop image
lastTerminatedState := s.LastTerminationState.Terminated
sidecarState = corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: lastTerminatedState.ExitCode,
Reason: "Completed",
Message: "Sidecar container successfully stopped by nop image",
StartedAt: lastTerminatedState.StartedAt,
FinishedAt: lastTerminatedState.FinishedAt,
ContainerID: lastTerminatedState.ContainerID,
},
}
} else {
// Sidecar has not been terminated
sidecarState = s.State
}
tr.Status.Sidecars = append(tr.Status.Sidecars, v1.SidecarState{
ContainerState: *sidecarState.DeepCopy(),
Name: podconvert.TrimSidecarPrefix(s.Name),
Container: s.Name,
ImageID: s.ImageID,
})
}
}
return nil
}
// applyVolumeClaimTemplates and return WorkspaceBindings were templates is translated to PersistentVolumeClaims
func applyVolumeClaimTemplates(workspaceBindings []v1.WorkspaceBinding, owner metav1.OwnerReference) []v1.WorkspaceBinding {
taskRunWorkspaceBindings := make([]v1.WorkspaceBinding, 0, len(workspaceBindings))
for _, wb := range workspaceBindings {
if wb.VolumeClaimTemplate == nil {
taskRunWorkspaceBindings = append(taskRunWorkspaceBindings, wb)
continue
}
// apply template
b := v1.WorkspaceBinding{
Name: wb.Name,
SubPath: wb.SubPath,
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: volumeclaim.GeneratePVCNameFromWorkspaceBinding(wb.VolumeClaimTemplate.Name, wb, owner),
},
}
taskRunWorkspaceBindings = append(taskRunWorkspaceBindings, b)
}
return taskRunWorkspaceBindings
}
func storeTaskSpecAndMergeMeta(ctx context.Context, tr *v1.TaskRun, ts *v1.TaskSpec, meta *resolutionutil.ResolvedObjectMeta) error {
// Only store the TaskSpec once, if it has never been set before.
if tr.Status.TaskSpec == nil {
tr.Status.TaskSpec = ts
if meta == nil {
return nil
}
// Propagate annotations from Task to TaskRun. TaskRun annotations take precedences over Task.
tr.ObjectMeta.Annotations = kmap.Union(kmap.ExcludeKeys(meta.Annotations, tknreconciler.KubectlLastAppliedAnnotationKey), tr.ObjectMeta.Annotations)
// Propagate labels from Task to TaskRun. TaskRun labels take precedences over Task.
tr.ObjectMeta.Labels = kmap.Union(meta.Labels, tr.ObjectMeta.Labels)
if tr.Spec.TaskRef != nil {
tr.ObjectMeta.Labels[pipeline.TaskLabelKey] = meta.Name
}
}
cfg := config.FromContextOrDefaults(ctx)
if cfg.FeatureFlags.EnableProvenanceInStatus {
if tr.Status.Provenance == nil {
tr.Status.Provenance = &v1.Provenance{}
}
// Store FeatureFlags in the Provenance.
tr.Status.Provenance.FeatureFlags = cfg.FeatureFlags
// Propagate RefSource from remote resolution to TaskRun Status
// This lives outside of the status.spec check to avoid the case where only the spec is available in the first reconcile and refSource comes in next reconcile.
if meta != nil && meta.RefSource != nil && tr.Status.Provenance.RefSource == nil {
tr.Status.Provenance.RefSource = meta.RefSource
}
}
return nil
}
// willOverwritePodSetAffinity returns a bool indicating whether the
// affinity for pods will be overwritten with affinity assistant.
func willOverwritePodSetAffinity(taskRun *v1.TaskRun) bool {
var podTemplate pod.Template
if taskRun.Spec.PodTemplate != nil {
podTemplate = *taskRun.Spec.PodTemplate
}
return taskRun.Annotations[workspace.AnnotationAffinityAssistantName] != "" && podTemplate.Affinity != nil
}
// isResourceQuotaConflictError returns a bool indicating whether the
// k8 error is of kind resourcequotas or not
func isResourceQuotaConflictError(err error) bool {
var k8Err k8serrors.APIStatus
if !errors.As(err, &k8Err) {
return false
}
k8ErrStatus := k8Err.Status()
if k8ErrStatus.Reason != metav1.StatusReasonConflict {
return false
}
return k8ErrStatus.Details != nil && k8ErrStatus.Details.Kind == "resourcequotas"
}
const (
// optimisticLockErrorMsg is an error message exported from k8s.io/apiserver/pkg/registry/generic/registry.OptimisticLockErrorMsg
// We made a tradeoff here because importing the package would introduce approximately 94klines
// of code as a new dependency, and it would only be used to export one constant in one place.
// In future we might find a better way to maintain consistency for this upstream error message.
optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again"
)
// isConcurrentModificationError determines whether it is a concurrent
// modification error depending on its error type and error message.
func isConcurrentModificationError(err error) bool {
if !k8serrors.IsConflict(err) {
return false
}
var se *k8serrors.StatusError
if !errors.As(err, &se) {
return false
}
return strings.Contains(err.Error(), optimisticLockErrorMsg)
}
// retryTaskRun archives taskRun.Status to taskRun.Status.RetriesStatus, and set
// taskRun status to Unknown with Reason v1.TaskRunReasonToBeRetried.
func retryTaskRun(tr *v1.TaskRun, message string) {
newStatus := tr.Status.DeepCopy()
newStatus.RetriesStatus = nil
tr.Status.RetriesStatus = append(tr.Status.RetriesStatus, *newStatus)
tr.Status.StartTime = nil
tr.Status.CompletionTime = nil
tr.Status.PodName = ""
tr.Status.Results = nil
taskRunCondSet := apis.NewBatchConditionSet()
taskRunCondSet.Manage(&tr.Status).MarkUnknown(apis.ConditionSucceeded, v1.TaskRunReasonToBeRetried.String(), message)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package taskrun
import (
"context"
"encoding/json"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
"knative.dev/pkg/logging"
)
const (
// TracerName is the name of the tracer
TracerName = "TaskRunReconciler"
// SpanContextAnnotation is the name of the Annotation used for propogating SpanContext
SpanContextAnnotation = "tekton.dev/taskrunSpanContext"
)
// initialize tracing by creating the root span and injecting the
// spanContext is propogated through annotations in the CR
func initTracing(ctx context.Context, tracerProvider trace.TracerProvider, tr *v1.TaskRun) context.Context {
logger := logging.FromContext(ctx)
pro := otel.GetTextMapPropagator()
// SpanContext was created already
if len(tr.Status.SpanContext) > 0 {
return pro.Extract(ctx, propagation.MapCarrier(tr.Status.SpanContext))
}
spanContext := make(map[string]string)
// SpanContext was propogated through annotations
if tr.Annotations != nil && tr.Annotations[SpanContextAnnotation] != "" {
err := json.Unmarshal([]byte(tr.Annotations[SpanContextAnnotation]), &spanContext)
if err != nil {
logger.Error("unable to unmarshal spancontext, err: %s", err)
}
tr.Status.SpanContext = spanContext
return pro.Extract(ctx, propagation.MapCarrier(tr.Status.SpanContext))
}
// Create a new root span since there was no parent spanContext provided through annotations
ctxWithTrace, span := tracerProvider.Tracer(TracerName).Start(ctx, "TaskRun:Reconciler")
defer span.End()
span.SetAttributes(attribute.String("taskrun", tr.Name), attribute.String("namespace", tr.Namespace))
pro.Inject(ctxWithTrace, propagation.MapCarrier(spanContext))
logger.Debug("got tracing carrier", spanContext)
if len(spanContext) == 0 {
logger.Debug("tracerProvider doesn't provide a traceId, tracing is disabled")
return ctx
}
span.AddEvent("updating TaskRun status with SpanContext")
tr.Status.SpanContext = spanContext
return ctxWithTrace
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package taskrun
import (
"context"
"fmt"
"sort"
"strings"
"errors"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/list"
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/strings/slices"
)
// validateParams validates that all Pipeline Task, Matrix.Params and Matrix.Include parameters all have values, match the specified
// type and object params have all the keys required
func validateParams(ctx context.Context, paramSpecs []v1.ParamSpec, params v1.Params, matrixParams v1.Params) error {
if paramSpecs == nil {
return nil
}
neededParamsNames, neededParamsTypes := neededParamsNamesAndTypes(paramSpecs)
providedParams := params
providedParams = append(providedParams, matrixParams...)
providedParamsNames := providedParams.ExtractNames()
if missingParamsNames := missingParamsNames(neededParamsNames, providedParamsNames, paramSpecs); len(missingParamsNames) != 0 {
return fmt.Errorf("missing values for these params which have no default values: %s", missingParamsNames)
}
if wrongTypeParamNames := wrongTypeParamsNames(params, matrixParams, neededParamsTypes); len(wrongTypeParamNames) != 0 {
return fmt.Errorf("param types don't match the user-specified type: %s", wrongTypeParamNames)
}
if missingKeysObjectParamNames := MissingKeysObjectParamNames(paramSpecs, params); len(missingKeysObjectParamNames) != 0 {
return fmt.Errorf("missing keys for these params which are required in ParamSpec's properties %v", missingKeysObjectParamNames)
}
return nil
}
// neededParamsNamesAndTypes returns the needed parameter names and types based on the paramSpec
func neededParamsNamesAndTypes(paramSpecs []v1.ParamSpec) (sets.String, map[string]v1.ParamType) {
neededParamsNames := sets.String{}
neededParamsTypes := make(map[string]v1.ParamType)
for _, inputResourceParam := range paramSpecs {
neededParamsNames.Insert(inputResourceParam.Name)
neededParamsTypes[inputResourceParam.Name] = inputResourceParam.Type
}
return neededParamsNames, neededParamsTypes
}
// missingParamsNames returns a slice of missing parameter names that have not been declared with a default value
// in the paramSpec
func missingParamsNames(neededParams sets.String, providedParams sets.String, paramSpecs []v1.ParamSpec) []string {
missingParamsNames := neededParams.Difference(providedParams)
var missingParamsNamesWithNoDefaults []string
for _, inputResourceParam := range paramSpecs {
if missingParamsNames.Has(inputResourceParam.Name) && inputResourceParam.Default == nil {
missingParamsNamesWithNoDefaults = append(missingParamsNamesWithNoDefaults, inputResourceParam.Name)
}
}
return missingParamsNamesWithNoDefaults
}
func wrongTypeParamsNames(params []v1.Param, matrix v1.Params, neededParamsTypes map[string]v1.ParamType) []string {
// TODO(#4723): validate that $(task.taskname.result.resultname) is invalid for array and object type.
// It should be used to refer string and need to add [*] to refer to array or object.
var wrongTypeParamNames []string
for _, param := range params {
if _, ok := neededParamsTypes[param.Name]; !ok {
// Ignore any missing params - this happens when extra params were
// passed to the task that aren't being used.
continue
}
// This is needed to support array replacements in params. Users want to use $(tasks.taskName.results.resultname[*])
// to pass array result to array param, yet in yaml format this will be
// unmarshalled to string for ParamValues. So we need to check and skip this validation.
// Please refer issue #4879 for more details and examples.
if param.Value.Type == v1.ParamTypeString && (neededParamsTypes[param.Name] == v1.ParamTypeArray || neededParamsTypes[param.Name] == v1.ParamTypeObject) && v1.VariableSubstitutionRegex.MatchString(param.Value.StringVal) {
continue
}
if param.Value.Type != neededParamsTypes[param.Name] {
wrongTypeParamNames = append(wrongTypeParamNames, param.Name)
}
}
for _, param := range matrix {
if _, ok := neededParamsTypes[param.Name]; !ok {
// Ignore any missing params - this happens when extra params were
// passed to the task that aren't being used.
continue
}
// Matrix param replacements must be of type String
if neededParamsTypes[param.Name] != v1.ParamTypeString {
wrongTypeParamNames = append(wrongTypeParamNames, param.Name)
}
}
return wrongTypeParamNames
}
// MissingKeysObjectParamNames checks if all required keys of object type param definitions are provided in params or param definitions' defaults.
func MissingKeysObjectParamNames(paramSpecs []v1.ParamSpec, params v1.Params) map[string][]string {
neededKeys := make(map[string][]string)
providedKeys := make(map[string][]string)
for _, spec := range paramSpecs {
if spec.Type == v1.ParamTypeObject {
// collect required keys from properties section
for key := range spec.Properties {
neededKeys[spec.Name] = append(neededKeys[spec.Name], key)
}
// collect provided keys from default
if spec.Default != nil && spec.Default.ObjectVal != nil {
for key := range spec.Default.ObjectVal {
providedKeys[spec.Name] = append(providedKeys[spec.Name], key)
}
}
}
}
// collect provided keys from run level value
for _, p := range params {
if p.Value.Type == v1.ParamTypeObject {
for key := range p.Value.ObjectVal {
providedKeys[p.Name] = append(providedKeys[p.Name], key)
}
}
}
return findMissingKeys(neededKeys, providedKeys)
}
// findMissingKeys checks if objects have missing keys in its providers (taskrun value and default)
func findMissingKeys(neededKeys, providedKeys map[string][]string) map[string][]string {
missings := map[string][]string{}
for p, keys := range providedKeys {
if _, ok := neededKeys[p]; !ok {
// Ignore any missing objects - this happens when object param is provided with default
continue
}
missedKeys := list.DiffLeft(neededKeys[p], keys)
if len(missedKeys) != 0 {
missings[p] = missedKeys
}
}
return missings
}
// ValidateResolvedTask validates that all parameters declared in the TaskSpec are present in the taskrun
// It also validates that all parameters have values, parameter types match the specified type and
// object params have all the keys required
func ValidateResolvedTask(ctx context.Context, params []v1.Param, matrix *v1.Matrix, rtr *resources.ResolvedTask) error {
var paramSpecs v1.ParamSpecs
if rtr != nil {
paramSpecs = rtr.TaskSpec.Params
}
if err := validateParams(ctx, paramSpecs, params, matrix.GetAllParams()); err != nil {
return pipelineErrors.WrapUserError(fmt.Errorf("invalid input params for task %s: %w", rtr.TaskName, err))
}
return nil
}
// ValidateEnumParam validates the param values are in the defined enum list in the corresponding paramSpecs if provided.
// A validation error is returned otherwise.
func ValidateEnumParam(ctx context.Context, params []v1.Param, paramSpecs v1.ParamSpecs) error {
paramSpecNameToEnum := map[string][]string{}
for _, ps := range paramSpecs {
if len(ps.Enum) == 0 {
continue
}
paramSpecNameToEnum[ps.Name] = ps.Enum
}
for _, p := range params {
// skip validation for and non-string typed and optional params (using default value)
// the default value of param is validated at validation webhook dryrun
if p.Value.Type != v1.ParamTypeString || p.Value.StringVal == "" {
continue
}
// skip validation for paramSpec without enum
if _, ok := paramSpecNameToEnum[p.Name]; !ok {
continue
}
if !slices.Contains(paramSpecNameToEnum[p.Name], p.Value.StringVal) {
return pipelineErrors.WrapUserError(fmt.Errorf("param `%s` value: %s is not in the enum list", p.Name, p.Value.StringVal))
}
}
return nil
}
func validateTaskSpecRequestResources(taskSpec *v1.TaskSpec) error {
if taskSpec != nil {
for _, step := range taskSpec.Steps {
for k, request := range step.ComputeResources.Requests {
// First validate the limit in step
if limit, ok := step.ComputeResources.Limits[k]; ok {
if (&limit).Cmp(request) == -1 {
return pipelineErrors.WrapUserError(fmt.Errorf("invalid request resource value: %v must be less or equal to limit %v", request.String(), limit.String()))
}
} else if taskSpec.StepTemplate != nil {
// If step doesn't configure the limit, validate the limit in stepTemplate
if limit, ok := taskSpec.StepTemplate.ComputeResources.Limits[k]; ok {
if (&limit).Cmp(request) == -1 {
return pipelineErrors.WrapUserError(fmt.Errorf("invalid request resource value: %v must be less or equal to limit %v", request.String(), limit.String()))
}
}
}
}
}
}
return nil
}
// validateOverrides validates that all stepOverrides map to valid steps, and likewise for sidecarOverrides
func validateOverrides(ts *v1.TaskSpec, trs *v1.TaskRunSpec) error {
stepErr := validateStepOverrides(ts, trs)
sidecarErr := validateSidecarOverrides(ts, trs)
return errors.Join(stepErr, sidecarErr)
}
func validateStepOverrides(ts *v1.TaskSpec, trs *v1.TaskRunSpec) error {
var errs []error
stepNames := sets.NewString()
for _, step := range ts.Steps {
stepNames.Insert(step.Name)
}
for _, stepOverride := range trs.StepSpecs {
if !stepNames.Has(stepOverride.Name) {
errs = append(errs, pipelineErrors.WrapUserError(fmt.Errorf("invalid StepOverride: No Step named %s", stepOverride.Name)))
}
}
return errors.Join(errs...)
}
func validateSidecarOverrides(ts *v1.TaskSpec, trs *v1.TaskRunSpec) error {
var errs []error
sidecarNames := sets.NewString()
for _, sidecar := range ts.Sidecars {
sidecarNames.Insert(sidecar.Name)
}
for _, sidecarOverride := range trs.SidecarSpecs {
if !sidecarNames.Has(sidecarOverride.Name) {
errs = append(errs, pipelineErrors.WrapUserError(fmt.Errorf("invalid SidecarOverride: No Sidecar named %s", sidecarOverride.Name)))
}
}
return errors.Join(errs...)
}
// validateResults checks the emitted results type and object properties against the ones defined in spec.
func validateTaskRunResults(tr *v1.TaskRun, resolvedTaskSpec *v1.TaskSpec) error {
specResults := []v1.TaskResult{}
if tr.Spec.TaskSpec != nil {
specResults = append(specResults, tr.Spec.TaskSpec.Results...)
}
if resolvedTaskSpec != nil {
specResults = append(specResults, resolvedTaskSpec.Results...)
}
// When get the results, check if the type of result is the expected one
if missmatchedTypes := mismatchedTypesResults(tr, specResults); len(missmatchedTypes) != 0 {
var s []string
for k, v := range missmatchedTypes {
s = append(s, fmt.Sprintf(" \"%v\": %v", k, v))
}
sort.Strings(s)
return pipelineErrors.WrapUserError(fmt.Errorf("Provided results don't match declared results; may be invalid JSON or missing result declaration: %v", strings.Join(s, ",")))
}
// When get the results, for object value need to check if they have missing keys.
if missingKeysObjectNames := missingKeysofObjectResults(tr, specResults); len(missingKeysObjectNames) != 0 {
return pipelineErrors.WrapUserError(fmt.Errorf("missing keys for these results which are required in TaskResult's properties %v", missingKeysObjectNames))
}
return nil
}
// mismatchedTypesResults checks and returns all the mismatched types of emitted results against specified results.
func mismatchedTypesResults(tr *v1.TaskRun, specResults []v1.TaskResult) map[string]string {
neededTypes := make(map[string]string)
mismatchedTypes := make(map[string]string)
var filteredResults []v1.TaskRunResult
// collect needed types for results
for _, r := range specResults {
neededTypes[r.Name] = string(r.Type)
}
// collect mismatched types for results, and correct results in filteredResults
// TODO(#6097): Validate if the emitted results are defined in taskspec
for _, trr := range tr.Status.Results {
needed, ok := neededTypes[trr.Name]
if ok && needed != string(trr.Type) {
mismatchedTypes[trr.Name] = fmt.Sprintf("task result is expected to be \"%v\" type but was initialized to a different type \"%v\"", needed, trr.Type)
} else {
filteredResults = append(filteredResults, trr)
}
}
// remove the mismatched results
tr.Status.Results = filteredResults
return mismatchedTypes
}
// missingKeysofObjectResults checks and returns the missing keys of object results.
func missingKeysofObjectResults(tr *v1.TaskRun, specResults []v1.TaskResult) map[string][]string {
neededKeys := make(map[string][]string)
providedKeys := make(map[string][]string)
// collect needed keys for object results
for _, r := range specResults {
if string(r.Type) == string(v1.ParamTypeObject) {
for key := range r.Properties {
neededKeys[r.Name] = append(neededKeys[r.Name], key)
}
}
}
// collect provided keys for object results
for _, trr := range tr.Status.Results {
if trr.Value.Type == v1.ParamTypeObject {
for key := range trr.Value.ObjectVal {
providedKeys[trr.Name] = append(providedKeys[trr.Name], key)
}
}
}
return findMissingKeys(neededKeys, providedKeys)
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"os"
"strconv"
"testing"
"github.com/tektoncd/pipeline/pkg/apis/config"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/system"
"sigs.k8s.io/yaml"
)
const (
apiFieldsFeatureFlag = "enable-api-fields"
maxMatrixCombinationsCountFlag = "default-max-matrix-combinations-count"
)
// ConfigMapFromTestFile creates a v1.ConfigMap from a YAML file
// It loads the YAML file from the testdata folder.
func ConfigMapFromTestFile(t *testing.T, name string) *corev1.ConfigMap {
t.Helper()
b, err := os.ReadFile(fmt.Sprintf("testdata/%s.yaml", name))
if err != nil {
t.Fatalf("ReadFile() = %v", err)
}
var cm corev1.ConfigMap
// Use "sigs.k8s.io/yaml" since it reads json struct
// tags so things unmarshal properly
if err := yaml.Unmarshal(b, &cm); err != nil {
t.Fatalf("yaml.Unmarshal() = %v", err)
}
return &cm
}
func NewFeatureFlagsConfigMapInSlice() []*corev1.ConfigMap {
return []*corev1.ConfigMap{newFeatureFlagsConfigMap()}
}
func newFeatureFlagsConfigMap() *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: config.GetFeatureFlagsConfigName(),
Namespace: system.Namespace(),
},
Data: make(map[string]string),
}
}
func NewAlphaFeatureFlagsConfigMapInSlice() []*corev1.ConfigMap {
return []*corev1.ConfigMap{withEnabledAlphaAPIFields(newFeatureFlagsConfigMap())}
}
func withEnabledAlphaAPIFields(cm *corev1.ConfigMap) *corev1.ConfigMap {
newCM := cm.DeepCopy()
newCM.Data[apiFieldsFeatureFlag] = config.AlphaAPIFields
return newCM
}
func NewFeatureFlagsConfigMapWithMatrixInSlice(count int) []*corev1.ConfigMap {
return append(
NewFeatureFlagsConfigMapInSlice(),
withMaxMatrixCombinationsCount(newDefaultsConfigMap(), count),
)
}
func withMaxMatrixCombinationsCount(cm *corev1.ConfigMap, count int) *corev1.ConfigMap {
newCM := cm.DeepCopy()
newCM.Data[maxMatrixCombinationsCountFlag] = strconv.Itoa(count)
return newCM
}
func newDefaultsConfigMap() *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: config.GetDefaultsConfigName(),
Namespace: system.Namespace(),
},
Data: make(map[string]string),
}
}
func NewAlphaFeatureFlagsConfigMapWithMatrixInSlice(count int) []*corev1.ConfigMap {
return append(
NewAlphaFeatureFlagsConfigMapInSlice(),
withMaxMatrixCombinationsCount(newDefaultsConfigMap(), count),
)
}
func NewDefaultsCofigMapInSlice() []*corev1.ConfigMap {
return []*corev1.ConfigMap{newDefaultsConfigMap()}
}
package testing
import (
"fmt"
"testing"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/test/parse"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
var (
trueb = true
)
// TwoPipelinesInPipelineMixedTasks creates a parent Pipeline with two embedded child Pipelines:
// one using an embedded taskSpec and the other using a taskRef. It also creates a PipelineRun
// for the parent Pipeline, the expected child PipelineRuns for each child Pipeline and the
// referenced task.
func TwoPipelinesInPipelineMixedTasks(t *testing.T, namespace, parentPipelineRunName string) (*v1.Task, *v1.Pipeline, *v1.PipelineRun, []*v1.PipelineRun) {
t.Helper()
uid := "bar"
taskName := "ref-task"
parentPipelineName := "parent-pipeline-mixed"
childPipelineName1 := "child-pipeline-taskspec"
childPipelineName2 := "child-pipeline-taskref"
childPipelineTaskName1 := "child-taskspec"
childPipelineTaskName2 := "child-taskref"
task := parse.MustParseV1Task(t, fmt.Sprintf(`
metadata:
name: %s
namespace: %s
spec:
steps:
- name: mystep
image: mirror.gcr.io/busybox
script: 'echo "Hello from referenced task in child PipelineRun 2!"'
`, taskName, namespace))
parentPipeline := parse.MustParseV1Pipeline(t, fmt.Sprintf(`
metadata:
name: %s
namespace: %s
spec:
tasks:
- name: %s
pipelineSpec:
tasks:
- name: %s
taskSpec:
steps:
- name: mystep
image: mirror.gcr.io/busybox
script: 'echo "Hello from child PipelineRun 1!"'
- name: %s
pipelineSpec:
tasks:
- name: %s
taskRef:
name: %s
`, parentPipelineName, namespace, childPipelineName1, childPipelineTaskName1, childPipelineName2, childPipelineTaskName2, taskName))
parentPipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(`
metadata:
name: %s
namespace: %s
uid: %s
spec:
pipelineRef:
name: %s
`, parentPipelineRunName, namespace, uid, parentPipelineName))
expectedName1 := parentPipelineRunName + "-" + childPipelineName1
expectedChildPipelineRun1 := parse.MustParseChildPipelineRunWithObjectMeta(
t,
childPipelineRunWithObjectMeta(
expectedName1,
namespace,
parentPipelineRunName,
parentPipelineName,
childPipelineName1,
uid,
),
fmt.Sprintf(`
spec:
pipelineSpec:
tasks:
- name: %s
taskSpec:
steps:
- name: mystep
image: mirror.gcr.io/busybox
script: 'echo "Hello from child PipelineRun 1!"'
`, childPipelineTaskName1),
)
expectedName2 := parentPipelineRunName + "-" + childPipelineName2
expectedChildPipelineRun2 := parse.MustParseChildPipelineRunWithObjectMeta(
t,
childPipelineRunWithObjectMeta(
expectedName2,
namespace,
parentPipelineRunName,
parentPipelineName,
childPipelineName2,
uid,
),
fmt.Sprintf(`
spec:
pipelineSpec:
tasks:
- name: %s
taskRef:
name: %s
`, childPipelineTaskName2, taskName),
)
return task, parentPipeline, parentPipelineRun, []*v1.PipelineRun{expectedChildPipelineRun1, expectedChildPipelineRun2}
}
// OnePipelineInPipeline creates a single Pipeline with one child pipeline using
// PipelineSpec with TaskSpec. It also creates the according PipelineRun for it
// and the expected child PipelineRun against which the test will validate.
func OnePipelineInPipeline(t *testing.T, namespace, parentPipelineRunName string) (*v1.Pipeline, *v1.PipelineRun, *v1.PipelineRun) {
t.Helper()
uid := "bar"
parentPipelineName := "parent-pipeline"
childPipelineName := "child-pipeline"
childPipelineTaskName := "child-pipeline-task"
parentPipeline := parse.MustParseV1Pipeline(t, fmt.Sprintf(`
metadata:
name: %s
namespace: %s
spec:
tasks:
- name: %s
pipelineSpec:
tasks:
- name: %s
taskSpec:
steps:
- name: mystep
image: mirror.gcr.io/busybox
script: 'echo "Hello from child PipelineRun!"'
`, parentPipelineName, namespace, childPipelineName, childPipelineTaskName))
parentPipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(`
metadata:
name: %s
namespace: %s
uid: %s
spec:
pipelineRef:
name: %s
`, parentPipelineRunName, namespace, uid, parentPipelineName))
expectedName := parentPipelineRunName + "-" + childPipelineName
expectedChildPipelineRun := parse.MustParseChildPipelineRunWithObjectMeta(
t,
childPipelineRunWithObjectMeta(
expectedName,
namespace,
parentPipelineRunName,
parentPipelineName,
childPipelineName,
uid,
),
fmt.Sprintf(`
spec:
pipelineSpec:
tasks:
- name: %s
taskSpec:
steps:
- name: mystep
image: mirror.gcr.io/busybox
script: 'echo "Hello from child PipelineRun!"'
`, childPipelineTaskName),
)
return parentPipeline, parentPipelineRun, expectedChildPipelineRun
}
func WithAnnotationAndLabel(pr *v1.PipelineRun, withUnused bool) *v1.PipelineRun {
if pr.Annotations == nil {
pr.Annotations = map[string]string{}
}
pr.Annotations["tekton.test/annotation"] = "test-annotation-value"
if pr.Labels == nil {
pr.Labels = map[string]string{}
}
pr.Labels["tekton.test/label"] = "test-label-value"
if withUnused {
pr.Labels["tekton.dev/pipeline"] = "will-not-be-used"
}
return pr
}
func childPipelineRunWithObjectMeta(
childPipelineRunName,
ns,
parentPipelineRunName,
parentPipelineName,
pipelineTaskName,
uid string,
) metav1.ObjectMeta {
om := metav1.ObjectMeta{
Name: childPipelineRunName,
Namespace: ns,
OwnerReferences: []metav1.OwnerReference{{
Kind: pipeline.PipelineRunControllerName,
Name: parentPipelineRunName,
APIVersion: "tekton.dev/v1",
Controller: &trueb,
BlockOwnerDeletion: &trueb,
UID: types.UID(uid),
}},
Labels: map[string]string{
pipeline.PipelineLabelKey: parentPipelineName,
pipeline.PipelineRunLabelKey: parentPipelineRunName,
pipeline.PipelineTaskLabelKey: pipelineTaskName,
pipeline.PipelineRunUIDLabelKey: uid,
pipeline.MemberOfLabelKey: v1.PipelineTasks,
},
Annotations: map[string]string{},
}
return om
}
// NestedPipelinesInPipeline creates a three-level nested pipeline structure:
// Parent Pipeline -> Child Pipeline -> Grandchild Pipeline
// Returns the parent pipeline, parent pipelinerun, expected child pipelinerun, and expected grandchild pipelinerun
func NestedPipelinesInPipeline(t *testing.T, namespace, parentPipelineRunName string) (*v1.Pipeline, *v1.PipelineRun, *v1.PipelineRun, *v1.PipelineRun) {
t.Helper()
uid := "nested"
parentPipelineName := "parent-pipeline"
childPipelineName := "child-ppl"
grandchildPipelineName := "grandchild-ppl"
grandchildPipelineTaskName := "grandchild-task"
parentPipeline := parse.MustParseV1Pipeline(t, fmt.Sprintf(`
metadata:
name: %s
namespace: %s
spec:
tasks:
- name: %s
pipelineSpec:
tasks:
- name: %s
pipelineSpec:
tasks:
- name: %s
taskSpec:
steps:
- name: mystep
image: mirror.gcr.io/busybox
script: 'echo "Hello from grandchild Pipeline!"'
`, parentPipelineName, namespace, childPipelineName, grandchildPipelineName, grandchildPipelineTaskName))
parentPipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(`
metadata:
name: %s
namespace: %s
uid: %s
spec:
pipelineRef:
name: %s
`, parentPipelineRunName, namespace, uid, parentPipelineName))
// expected child pipeline run created by parent
expectedChildName := parentPipelineRunName + "-" + childPipelineName
expectedChildPipelineRun := parse.MustParseChildPipelineRunWithObjectMeta(
t,
childPipelineRunWithObjectMeta(
expectedChildName,
namespace,
parentPipelineRunName,
parentPipelineName,
childPipelineName,
uid,
),
fmt.Sprintf(`
spec:
pipelineSpec:
tasks:
- name: %s
pipelineSpec:
tasks:
- name: %s
taskSpec:
steps:
- name: mystep
image: mirror.gcr.io/busybox
script: 'echo "Hello from grandchild Pipeline!"'
`, grandchildPipelineName, grandchildPipelineTaskName),
)
// expected grandchild pipeline run created by child
expectedGrandchildName := expectedChildName + "-" + grandchildPipelineName
expectedGrandchildPipelineRun := parse.MustParseChildPipelineRunWithObjectMeta(
t,
childPipelineRunWithObjectMeta(
expectedGrandchildName,
namespace,
expectedChildName,
expectedChildName,
grandchildPipelineName,
"", // keep empty, UID is not set on actual child PipelineRun by fake client
),
fmt.Sprintf(`
spec:
pipelineSpec:
tasks:
- name: %s
taskSpec:
steps:
- name: mystep
image: mirror.gcr.io/busybox
script: 'echo "Hello from grandchild Pipeline!"'
`, grandchildPipelineTaskName),
)
return parentPipeline, parentPipelineRun, expectedChildPipelineRun, expectedGrandchildPipelineRun
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"testing"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
filteredinformerfactory "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
// Import for creating fake filtered factory in the test
_ "knative.dev/pkg/client/injection/kube/informers/factory/filtered/fake"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection"
"knative.dev/pkg/logging"
logtesting "knative.dev/pkg/logging/testing"
)
// SetupFakeContext sets up the Context and the fake filtered informers for the tests.
func SetupFakeContext(t *testing.T) (context.Context, []controller.Informer) {
t.Helper()
ctx, _, informer := setupFakeContextWithLabelKey(t)
return WithLogger(ctx, t), informer
}
// SetupFakeCloudClientContext sets up the fakeclient to context
func SetupFakeCloudClientContext(ctx context.Context, expectedEventCount int) context.Context {
cloudEventClientBehaviour := cloudevent.FakeClientBehaviour{
SendSuccessfully: true,
}
return cloudevent.WithFakeClient(ctx, &cloudEventClientBehaviour, expectedEventCount)
}
// SetupDefaultContext sets up the Context and the default filtered informers for the tests.
func SetupDefaultContext(t *testing.T) (context.Context, []controller.Informer) {
t.Helper()
ctx, _, informer := setupDefaultContextWithLabelKey(t)
return WithLogger(ctx, t), informer
}
// WithLogger returns the Logger
func WithLogger(ctx context.Context, t *testing.T) context.Context {
t.Helper()
return logging.WithLogger(ctx, TestLogger(t))
}
// TestLogger sets up the Logger
func TestLogger(t *testing.T) *zap.SugaredLogger {
logger := zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller()))
return logger.Sugar().Named(t.Name())
}
// setupFakeContextWithLabelKey sets up the Context and the fake informers for the tests
// The provided context includes the FilteredInformerFactory LabelKey.
func setupFakeContextWithLabelKey(t zaptest.TestingT) (context.Context, context.CancelFunc, []controller.Informer) {
ctx, c := context.WithCancel(logtesting.TestContextWithLogger(t))
ctx = controller.WithEventRecorder(ctx, record.NewFakeRecorder(1000))
ctx = filteredinformerfactory.WithSelectors(ctx, v1.ManagedByLabelKey)
ctx, is := injection.Fake.SetupInformers(ctx, &rest.Config{})
return ctx, c, is
}
// setupDefaultContextWithLabelKey sets up the Context and the default informers for the tests
// The provided context includes the FilteredInformerFactory LabelKey.
func setupDefaultContextWithLabelKey(t zaptest.TestingT) (context.Context, context.CancelFunc, []controller.Informer) {
ctx, c := context.WithCancel(logtesting.TestContextWithLogger(t))
ctx = filteredinformerfactory.WithSelectors(ctx, v1.ManagedByLabelKey)
ctx, is := injection.Default.SetupInformers(ctx, &rest.Config{})
return ctx, c, is
}
package testing
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
"knative.dev/pkg/apis"
)
const (
taskRun = pipeline.TaskRunControllerName
customRun = pipeline.CustomRunControllerName
pipelineRun = pipeline.PipelineRunControllerName
)
func CheckPipelineRunConditionStatusAndReason(
t *testing.T,
prStatus v1.PipelineRunStatus,
expectedStatus corev1.ConditionStatus,
expectedReason string,
) {
t.Helper()
actualCondition := prStatus.GetCondition(apis.ConditionSucceeded)
if actualCondition == nil {
t.Fatalf("want condition, got nil")
}
if actualCondition.Status != expectedStatus {
t.Errorf("want status %v, got %v", expectedStatus, actualCondition.Status)
}
if actualCondition.Reason != expectedReason {
t.Errorf("want reason %s, got %s", expectedReason, actualCondition.Reason)
}
}
func VerifyTaskRunStatusesCount(t *testing.T, prStatus v1.PipelineRunStatus, expectedCount int) {
t.Helper()
verifyCount(t, prStatus, expectedCount, taskRun)
}
func verifyCount(t *testing.T, prStatus v1.PipelineRunStatus, expectedCount int, kind string) {
t.Helper()
actualCount := len(filterChildRefsForKind(prStatus.ChildReferences, kind))
if actualCount != expectedCount {
oneOrMany := kind
if expectedCount > 1 {
oneOrMany += "s"
}
t.Errorf("Expected PipelineRun status ChildReferences to have %d %s, but was %d", expectedCount, oneOrMany, actualCount)
}
}
func filterChildRefsForKind(childRefs []v1.ChildStatusReference, kind string) []v1.ChildStatusReference {
var filtered []v1.ChildStatusReference
for _, cr := range childRefs {
if cr.Kind == kind {
filtered = append(filtered, cr)
}
}
return filtered
}
func VerifyTaskRunStatusesNames(t *testing.T, prStatus v1.PipelineRunStatus, expectedNames ...string) {
t.Helper()
verifyNames(t, prStatus, expectedNames, taskRun)
}
func verifyNames(t *testing.T, prStatus v1.PipelineRunStatus, expectedNames []string, kind string) {
t.Helper()
actualNames := make(map[string]bool)
for _, cr := range filterChildRefsForKind(prStatus.ChildReferences, kind) {
actualNames[cr.Name] = true
}
for _, expectedName := range expectedNames {
if actualNames[expectedName] {
continue
}
t.Errorf("Expected PipelineRun status to include %s status for %s but was %v", kind, expectedName, prStatus.ChildReferences)
}
}
func VerifyTaskRunStatusesWhenExpressions(t *testing.T, prStatus v1.PipelineRunStatus, trName string, expectedWhen []v1.WhenExpression) {
t.Helper()
var actualWhen []v1.WhenExpression
for _, cr := range prStatus.ChildReferences {
if cr.Name == trName {
actualWhen = append(actualWhen, cr.WhenExpressions...)
}
}
if d := cmp.Diff(expectedWhen, actualWhen); d != "" {
t.Errorf("Expected to see When Expressions %v created. Diff %s", trName, diff.PrintWantGot(d))
}
}
func VerifyCustomRunOrRunStatusesCount(t *testing.T, prStatus v1.PipelineRunStatus, expectedCount int) {
t.Helper()
verifyCount(t, prStatus, expectedCount, customRun)
}
func VerifyCustomRunOrRunStatusesNames(t *testing.T, prStatus v1.PipelineRunStatus, expectedNames ...string) {
t.Helper()
verifyNames(t, prStatus, expectedNames, customRun)
}
func VerifyChildPipelineRunStatusesCount(t *testing.T, prStatus v1.PipelineRunStatus, expectedCount int) {
t.Helper()
verifyCount(t, prStatus, expectedCount, pipelineRun)
}
func VerifyChildPipelineRunStatusesNames(t *testing.T, prStatus v1.PipelineRunStatus, expectedNames ...string) {
t.Helper()
verifyNames(t, prStatus, expectedNames, pipelineRun)
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumeclaim
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"strings"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"go.uber.org/zap"
"gomodules.xyz/jsonpatch/v2"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
)
const (
// ReasonCouldntCreateWorkspacePVC indicates that a Pipeline expects a workspace from a
// volumeClaimTemplate but couldn't create a claim.
ReasonCouldntCreateWorkspacePVC = "CouldntCreateWorkspacePVC"
)
var (
ErrPvcCreationFailed = errors.New("PVC creation error")
ErrPvcCreationFailedRetryable = errors.New("PVC creation error, retryable")
)
// PvcHandler is used to create PVCs for workspaces
type PvcHandler interface {
CreatePVCFromVolumeClaimTemplate(ctx context.Context, wb v1.WorkspaceBinding, ownerReference metav1.OwnerReference, namespace string) error
PurgeFinalizerAndDeletePVCForWorkspace(ctx context.Context, pvcName, namespace string) error
}
type defaultPVCHandler struct {
clientset clientset.Interface
logger *zap.SugaredLogger
}
// NewPVCHandler returns a new defaultPVCHandler
func NewPVCHandler(clientset clientset.Interface, logger *zap.SugaredLogger) PvcHandler {
return &defaultPVCHandler{clientset, logger}
}
// CreatePVCFromVolumeClaimTemplate checks if a PVC named <claim-name>-<workspace-name>-<owner-name> exists;
// where claim-name is provided by the user in the volumeClaimTemplate, and owner-name is the name of the
// resource with the volumeClaimTemplate declared, a PipelineRun or TaskRun. If the PVC did not exist, a new PVC
// with that name is created with the provided OwnerReference.
func (c *defaultPVCHandler) CreatePVCFromVolumeClaimTemplate(ctx context.Context, wb v1.WorkspaceBinding, ownerReference metav1.OwnerReference, namespace string) error {
claim := c.getPVCFromVolumeClaimTemplate(wb, ownerReference, namespace)
if claim == nil {
return nil
}
_, err := c.clientset.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
switch {
case apierrors.IsNotFound(err):
_, err := c.clientset.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{})
if err != nil {
if apierrors.IsAlreadyExists(err) {
c.logger.Infof("Tried to create PersistentVolumeClaim %s in namespace %s, but it already exists",
claim.Name, claim.Namespace)
} else if isRetryableError(err) {
// This is a retry-able error
return fmt.Errorf("%w for %s: %v", ErrPvcCreationFailedRetryable, claim.Name, err.Error())
} else {
return fmt.Errorf("%w for %s: %v", ErrPvcCreationFailed, claim.Name, err.Error())
}
} else {
c.logger.Infof("Created PersistentVolumeClaim %s in namespace %s", claim.Name, claim.Namespace)
}
case err != nil:
return fmt.Errorf("failed to retrieve PVC %s: %w", claim.Name, err)
}
return nil
}
// PurgeFinalizerAndDeletePVCForWorkspace deletes pvcs and then purges the `kubernetes.io/pvc-protection` finalizer protection.
// Purging the `kubernetes.io/pvc-protection` finalizer allows the pvc to be deleted even when it is referenced by a taskrun pod.
// See mode details in https://kubernetes.io/docs/concepts/storage/persistent-volumes/#storage-object-in-use-protection.
func (c *defaultPVCHandler) PurgeFinalizerAndDeletePVCForWorkspace(ctx context.Context, pvcName, namespace string) error {
p, err := c.clientset.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil {
// check if the PVC exists, otherwise skip the deletion
if apierrors.IsNotFound(err) {
c.logger.Debugf("PVC %s no longer exists, skipping deletion as it has already been removed", pvcName)
return nil
}
return fmt.Errorf("failed to get the PVC %s: %w", pvcName, err)
}
// get the list of existing finalizers and drop `pvc-protection` if exists
var finalizers []string
for _, f := range p.ObjectMeta.Finalizers {
if f == "kubernetes.io/pvc-protection" {
continue
}
finalizers = append(finalizers, f)
}
// prepare data to remove pvc-protection from the list of finalizers
removeFinalizerBytes, err := json.Marshal([]jsonpatch.JsonPatchOperation{{
Path: "/metadata/finalizers",
Operation: "replace",
Value: finalizers,
}})
if err != nil {
return fmt.Errorf("failed to marshal jsonpatch: %w", err)
}
// delete PVC
err = c.clientset.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvcName, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete the PVC %s: %w", pvcName, err)
}
// remove finalizer
_, err = c.clientset.CoreV1().PersistentVolumeClaims(namespace).Patch(ctx, pvcName, types.JSONPatchType, removeFinalizerBytes, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("failed to patch the PVC %s: %w", pvcName, err)
}
return nil
}
// getPVCFromVolumeClaimTemplate returns a PersistentVolumeClaim based on given workspaceBinding (using VolumeClaimTemplate), ownerReference and namespace
func (c *defaultPVCHandler) getPVCFromVolumeClaimTemplate(workspaceBinding v1.WorkspaceBinding, ownerReference metav1.OwnerReference, namespace string) *corev1.PersistentVolumeClaim {
if workspaceBinding.VolumeClaimTemplate == nil {
c.logger.Infof("workspace binding %v does not contain VolumeClaimTemplate, skipping creating PVC", workspaceBinding.Name)
return nil
}
claim := workspaceBinding.VolumeClaimTemplate.DeepCopy()
claim.Name = GeneratePVCNameFromWorkspaceBinding(workspaceBinding.VolumeClaimTemplate.Name, workspaceBinding, ownerReference)
claim.Namespace = namespace
claim.OwnerReferences = []metav1.OwnerReference{ownerReference}
return claim
}
// GeneratePVCNameFromWorkspaceBinding gets the name of PersistentVolumeClaim for a Workspace and PipelineRun or TaskRun. claim
// must be a PersistentVolumeClaim from a volumeClaimTemplate. The returned name must be consistent given the same
// workspaceBinding name and ownerReference UID - because it is first used for creating a PVC and later,
// possibly several TaskRuns to lookup the PVC to mount.
// We use ownerReference UID over ownerReference name to distinguish runs with the same name.
// If the given volumeClaimTemplate name is empty, the prefix "pvc" will be applied to the PersistentVolumeClaim name.
// See function `getPersistentVolumeClaimNameWithAffinityAssistant` when the PersistentVolumeClaim is created by Affinity Assistant StatefulSet.
func GeneratePVCNameFromWorkspaceBinding(claimName string, wb v1.WorkspaceBinding, owner metav1.OwnerReference) string {
if claimName == "" {
return fmt.Sprintf("%s-%s", "pvc", getPersistentVolumeClaimIdentity(wb.Name, string(owner.UID)))
}
return fmt.Sprintf("%s-%s", claimName, getPersistentVolumeClaimIdentity(wb.Name, string(owner.UID)))
}
func getPersistentVolumeClaimIdentity(workspaceName, ownerName string) string {
hashBytes := sha256.Sum256([]byte(workspaceName + ownerName))
hashString := hex.EncodeToString(hashBytes[:])
return hashString[:10]
}
func isRetryableError(err error) bool {
if (apierrors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota")) || apierrors.IsConflict(err) {
return true
}
return false
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oci
import (
"archive/tar"
"context"
"errors"
"fmt"
"io"
"strings"
"time"
"github.com/google/go-containerregistry/pkg/authn"
imgname "github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
ociremote "github.com/google/go-containerregistry/pkg/v1/remote"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
"github.com/tektoncd/pipeline/pkg/remote"
"k8s.io/apimachinery/pkg/runtime"
)
const (
// KindAnnotation is an OCI annotation for the bundle kind
KindAnnotation = "dev.tekton.image.kind"
// APIVersionAnnotation is an OCI annotation for the bundle version
APIVersionAnnotation = "dev.tekton.image.apiVersion"
// TitleAnnotation is an OCI annotation for the bundle title
TitleAnnotation = "dev.tekton.image.name"
// MaximumBundleObjects defines the maximum number of objects in a bundle
MaximumBundleObjects = 20
)
// Resolver implements the Resolver interface using OCI images.
type Resolver struct {
imageReference string
keychain authn.Keychain
timeout time.Duration
}
// NewResolver is a convenience function to return a new OCI resolver instance as a remote.Resolver with a short, 1m
// timeout for resolving an individual image.
func NewResolver(ref string, keychain authn.Keychain) remote.Resolver {
return &Resolver{imageReference: ref, keychain: keychain, timeout: time.Second * 60}
}
// List retrieves a flat set of Tekton objects
func (o *Resolver) List(ctx context.Context) ([]remote.ResolvedObject, error) {
timeoutCtx, cancel := context.WithTimeout(ctx, o.timeout)
defer cancel()
img, err := o.retrieveImage(timeoutCtx)
if err != nil {
return nil, err
}
manifest, err := img.Manifest()
if err != nil {
return nil, fmt.Errorf("Could not parse image manifest: %w", err)
}
if err := o.checkImageCompliance(manifest); err != nil {
return nil, err
}
contents := make([]remote.ResolvedObject, 0, len(manifest.Layers))
for _, l := range manifest.Layers {
contents = append(contents, remote.ResolvedObject{
Kind: l.Annotations[KindAnnotation],
APIVersion: l.Annotations[APIVersionAnnotation],
Name: l.Annotations[TitleAnnotation],
})
}
return contents, nil
}
// Get retrieves a specific object with the given Kind and name
func (o *Resolver) Get(ctx context.Context, kind, name string) (runtime.Object, *pipelinev1.RefSource, error) {
timeoutCtx, cancel := context.WithTimeout(ctx, o.timeout)
defer cancel()
img, err := o.retrieveImage(timeoutCtx)
if err != nil {
return nil, nil, err
}
manifest, err := img.Manifest()
if err != nil {
return nil, nil, fmt.Errorf("could not parse image manifest: %w", err)
}
if err := o.checkImageCompliance(manifest); err != nil {
return nil, nil, err
}
layers, err := img.Layers()
if err != nil {
return nil, nil, fmt.Errorf("could not read image layers: %w", err)
}
layerMap := map[string]v1.Layer{}
for _, l := range layers {
digest, err := l.Digest()
if err != nil {
return nil, nil, fmt.Errorf("failed to find digest for layer: %w", err)
}
layerMap[digest.String()] = l
}
for idx, l := range manifest.Layers {
lKind := l.Annotations[KindAnnotation]
lName := l.Annotations[TitleAnnotation]
if kind == lKind && name == lName {
obj, err := readTarLayer(layerMap[l.Digest.String()])
if err != nil {
// This could still be a raw layer so try to read it as that instead.
obj, err := readRawLayer(layers[idx])
return obj, nil, err
}
return obj, nil, nil
}
}
return nil, nil, fmt.Errorf("could not find object in image with kind: %s and name: %s", kind, name)
}
// retrieveImage will fetch the image's contents and manifest.
func (o *Resolver) retrieveImage(ctx context.Context) (v1.Image, error) {
imgRef, err := imgname.ParseReference(o.imageReference)
if err != nil {
return nil, fmt.Errorf("%s is an unparseable image reference: %w", o.imageReference, err)
}
return ociremote.Image(imgRef, ociremote.WithAuthFromKeychain(o.keychain), ociremote.WithContext(ctx))
}
// checkImageCompliance will perform common checks to ensure the Tekton Bundle is compliant to our spec.
func (o *Resolver) checkImageCompliance(manifest *v1.Manifest) error {
// Check the manifest's layers to ensure there are a maximum of 10.
if len(manifest.Layers) > MaximumBundleObjects {
return fmt.Errorf("bundle %s contained more than the maximum %d allow objects", o.imageReference, MaximumBundleObjects)
}
// Ensure each layer complies to the spec.
for _, l := range manifest.Layers {
refDigest := fmt.Sprintf("%s:%s", o.imageReference, l.Digest.String())
if _, ok := l.Annotations[APIVersionAnnotation]; !ok {
return fmt.Errorf("invalid tekton bundle: %s does not contain a %s annotation", refDigest, APIVersionAnnotation)
}
if _, ok := l.Annotations[TitleAnnotation]; !ok {
return fmt.Errorf("invalid tekton bundle: %s does not contain a %s annotation", refDigest, TitleAnnotation)
}
kind, ok := l.Annotations[KindAnnotation]
if !ok {
return fmt.Errorf("invalid tekton bundle: %s does not contain a %s annotation", refDigest, KindAnnotation)
}
if strings.TrimSuffix(strings.ToLower(kind), "s") != kind {
return fmt.Errorf("invalid tekton bundle: %s annotation for %s must be lowercased and singular, found %s", KindAnnotation, refDigest, kind)
}
}
return nil
}
// Utility function to read out the contents of an image layer, assumed to be a tarball, as a parsed Tekton resource.
func readTarLayer(layer v1.Layer) (runtime.Object, error) {
rc, err := layer.Uncompressed()
if err != nil {
return nil, fmt.Errorf("Failed to read image layer: %w", err)
}
defer rc.Close()
// If the user bundled this up as a tar file then we need to untar it.
treader := tar.NewReader(rc)
header, err := treader.Next()
if err != nil {
return nil, errors.New("layer is not a tarball")
}
contents := make([]byte, header.Size)
if _, err := treader.Read(contents); err != nil && !errors.Is(err, io.EOF) {
// We only allow 1 resource per layer so this tar bundle should have one and only one file.
return nil, fmt.Errorf("failed to read tar bundle: %w", err)
}
obj, _, err := scheme.Codecs.UniversalDeserializer().Decode(contents, nil, nil)
return obj, err
}
// Utility function to read out the contents of an image layer, assumed to be raw bytes, as a parsed Tekton resource.
func readRawLayer(layer v1.Layer) (runtime.Object, error) {
rc, err := layer.Uncompressed()
if err != nil {
return nil, fmt.Errorf("failed to read image layer: %w", err)
}
defer rc.Close()
contents, err := io.ReadAll(rc)
if err != nil {
return nil, fmt.Errorf("could not read contents of image layer: %w", err)
}
obj, _, err := scheme.Codecs.UniversalDeserializer().Decode(contents, nil, nil)
return obj, err
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resolution
import (
"errors"
"fmt"
)
// ErrNilResource is returned when remote resolution
// appears to have succeeded but the resolved resource is nil.
var ErrNilResource = errors.New("unknown error occurred: requested resource is nil")
// InvalidRuntimeObjectError is returned when remote resolution
// succeeded but the returned data is not a valid runtime.Object.
type InvalidRuntimeObjectError struct {
Original error
}
var _ error = &InvalidRuntimeObjectError{}
// Error returns the string representation of this error.
func (e *InvalidRuntimeObjectError) Error() string {
return fmt.Sprintf("invalid runtime object: %v", e.Original)
}
// Unwrap returns the underlying original error.
func (e *InvalidRuntimeObjectError) Unwrap() error {
return e.Original
}
// Is returns true if the given error coerces into an error of this type.
func (e *InvalidRuntimeObjectError) Is(that error) bool {
return errors.As(that, &e)
}
// DataAccessError is returned when remote resolution succeeded but
// attempting to access the resolved data failed. An example of this
// type of error would be if a ResolutionRequest contained malformed base64.
type DataAccessError struct {
Original error
}
var _ error = &DataAccessError{}
// Error returns the string representation of this error.
func (e *DataAccessError) Error() string {
return fmt.Sprintf("error accessing data from remote resource: %v", e.Original)
}
// Unwrap returns the underlying original error.
func (e *DataAccessError) Unwrap() error {
return e.Original
}
// Is returns true if the given error coerces into an error of this type.
func (e *DataAccessError) Is(that error) bool {
return errors.As(that, &e)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resolution
import (
resolution "github.com/tektoncd/pipeline/pkg/resolution/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/kmeta"
)
var _ resolution.Request = &resolutionRequest{}
var _ resolution.OwnedRequest = &resolutionRequest{}
type resolutionRequest struct {
resolution.Request
owner kmeta.OwnerRefable
}
func (req *resolutionRequest) OwnerRef() metav1.OwnerReference {
return *kmeta.NewControllerRef(req.owner)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resolution
import (
"context"
"errors"
"fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
"github.com/tektoncd/pipeline/pkg/remote"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
remoteresource "github.com/tektoncd/pipeline/pkg/resolution/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"knative.dev/pkg/kmeta"
)
// Resolver implements remote.Resolver and encapsulates the majority of
// code required to interface with the tektoncd/resolution project. It
// is used to make async requests for resources like pipelines from
// remote places like git repos.
type Resolver struct {
requester remoteresource.Requester
owner kmeta.OwnerRefable
resolverName string
params v1.Params
targetName string
targetNamespace string
}
var _ remote.Resolver = &Resolver{}
// NewResolver returns an implementation of remote.Resolver capable
// of performing asynchronous remote resolution.
func NewResolver(requester remoteresource.Requester, owner kmeta.OwnerRefable, resolverName string, targetName string, targetNamespace string, params v1.Params) remote.Resolver {
return &Resolver{
requester: requester,
owner: owner,
resolverName: resolverName,
params: params,
targetName: targetName,
targetNamespace: targetNamespace,
}
}
// Get implements remote.Resolver.
func (resolver *Resolver) Get(ctx context.Context, _, _ string) (runtime.Object, *v1.RefSource, error) {
resolverName := remoteresource.ResolverName(resolver.resolverName)
req, err := buildRequest(resolver.resolverName, resolver.owner, resolver.targetName, resolver.targetNamespace, resolver.params)
if err != nil {
return nil, nil, fmt.Errorf("error building request for remote resource: %w", err)
}
resolved, err := resolver.requester.Submit(ctx, resolverName, req)
return ResolvedRequest(resolved, err)
}
// List implements remote.Resolver but is unused for remote resolution.
func (resolver *Resolver) List(_ context.Context) ([]remote.ResolvedObject, error) {
return nil, nil
}
func buildRequest(resolverName string, owner kmeta.OwnerRefable, name string, namespace string, params v1.Params) (*resolutionRequest, error) {
rr := &v1beta1.ResolutionRequestSpec{
Params: params,
}
name, namespace, err := remoteresource.GetNameAndNamespace(resolverName, owner, name, namespace, rr)
if err != nil {
return nil, err
}
req := &resolutionRequest{
Request: remoteresource.NewRequest(name, namespace, params),
owner: owner,
}
return req, nil
}
func ResolvedRequest(resolved resolutioncommon.ResolvedResource, err error) (runtime.Object, *v1.RefSource, error) {
switch {
case errors.Is(err, resolutioncommon.ErrRequestInProgress):
return nil, nil, remote.ErrRequestInProgress
case err != nil:
return nil, nil, fmt.Errorf("error requesting remote resource: %w", err)
case resolved == nil:
return nil, nil, ErrNilResource
default:
}
data, err := resolved.Data()
if err != nil {
return nil, nil, &DataAccessError{Original: err}
}
codecs := serializer.NewCodecFactory(scheme.Scheme, serializer.EnableStrict)
obj, _, err := codecs.UniversalDeserializer().Decode(data, nil, nil)
if err != nil {
return nil, nil, &InvalidRuntimeObjectError{Original: err}
}
return obj, resolved.RefSource(), nil
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resolution
import (
resolution "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/kmeta"
)
var _ resolution.Request = &resolutionRequest{}
var _ resolution.OwnedRequest = &resolutionRequest{}
type resolutionRequest struct {
resolution.Request
owner kmeta.OwnerRefable
}
func (req *resolutionRequest) OwnerRef() metav1.OwnerReference {
return *kmeta.NewControllerRef(req.owner)
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resolution
import (
"context"
"fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/remote"
resolution "github.com/tektoncd/pipeline/pkg/remote/resolution"
remoteresource "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
resource "github.com/tektoncd/pipeline/pkg/resolution/resource"
"k8s.io/apimachinery/pkg/runtime"
"knative.dev/pkg/kmeta"
)
var _ remote.Resolver = (*Resolver)(nil)
// Resolver implements remote.Resolver and encapsulates the majority of
// code required to interface with the tektoncd/resolution project. It
// is used to make async requests for resources like pipelines from
// remote places like git repos.
type Resolver struct {
requester remoteresource.Requester
owner kmeta.OwnerRefable
resolverName string
resolverPayload remoteresource.ResolverPayload
}
// NewResolver returns an implementation of remote.Resolver capable
// of performing asynchronous remote resolution.
func NewResolver(requester remoteresource.Requester, owner kmeta.OwnerRefable, resolverName string, resolverPayload remoteresource.ResolverPayload) remote.Resolver {
return &Resolver{
requester: requester,
owner: owner,
resolverName: resolverName,
resolverPayload: resolverPayload,
}
}
// Get implements remote.Resolver.
func (resolver *Resolver) Get(ctx context.Context, _, _ string) (runtime.Object, *v1.RefSource, error) {
resolverName := remoteresource.ResolverName(resolver.resolverName)
req, err := buildRequest(resolver.resolverName, resolver.owner, &resolver.resolverPayload)
if err != nil {
return nil, nil, fmt.Errorf("error building request for remote resource: %w", err)
}
resolved, err := resolver.requester.Submit(ctx, resolverName, req)
return resolution.ResolvedRequest(resolved, err)
}
// List implements remote.Resolver but is unused for remote resolution.
func (resolver *Resolver) List(_ context.Context) ([]remote.ResolvedObject, error) {
return nil, nil
}
func buildRequest(resolverName string, owner kmeta.OwnerRefable, resolverPayload *remoteresource.ResolverPayload) (*resolutionRequest, error) {
var name string
var namespace string
var url string
var params v1.Params
if resolverPayload != nil {
name = resolverPayload.Name
namespace = resolverPayload.Namespace
if resolverPayload.ResolutionSpec != nil {
params = resolverPayload.ResolutionSpec.Params
url = resolverPayload.ResolutionSpec.URL
}
}
rr := &v1beta1.ResolutionRequestSpec{
Params: params,
URL: url,
}
name, namespace, err := resource.GetNameAndNamespace(resolverName, owner, name, namespace, rr)
if err != nil {
return nil, err
}
resolverPayload.Name = name
resolverPayload.Namespace = namespace
req := &resolutionRequest{
Request: remoteresource.NewRequest(*resolverPayload),
owner: owner,
}
return req, nil
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bundle
import (
"context"
"errors"
"strings"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
bundleresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/bundle"
resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"k8s.io/client-go/kubernetes"
kubeclient "knative.dev/pkg/client/injection/kube/client"
)
const (
// LabelValueBundleResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
LabelValueBundleResolverType = "bundles"
// BundleResolverName is the name that the bundle resolver should be associated with.
BundleResolverName = "bundleresolver"
)
var _ framework.Resolver = (*Resolver)(nil)
var _ resolutionframework.ConfigWatcher = (*Resolver)(nil)
var _ cache.ImmutabilityChecker = (*Resolver)(nil)
// Resolver implements a framework.Resolver that can fetch files from OCI bundles.
type Resolver struct {
kubeClientSet kubernetes.Interface
resolveRequestFunc func(context.Context, kubernetes.Interface, *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error)
}
// Initialize sets up any dependencies needed by the Resolver. None atm.
func (r *Resolver) Initialize(ctx context.Context) error {
r.kubeClientSet = kubeclient.Get(ctx)
if r.resolveRequestFunc == nil {
r.resolveRequestFunc = bundleresolution.ResolveRequest
}
return nil
}
// GetName returns a string name to refer to this Resolver by.
func (r *Resolver) GetName(_ context.Context) string {
return BundleResolverName
}
// GetSelector returns a map of labels to match against tasks requesting
// resolution from this Resolver.
func (r *Resolver) GetSelector(_ context.Context) map[string]string {
return map[string]string{
resolutioncommon.LabelKeyResolverType: LabelValueBundleResolverType,
}
}
// GetConfigName returns the name of the bundle resolver's configmap.
func (r *Resolver) GetConfigName(_ context.Context) string {
return bundleresolution.ConfigMapName
}
// Validate ensures parameters from a request are as expected.
func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
return bundleresolution.ValidateParams(ctx, req.Params)
}
// IsImmutable implements ImmutabilityChecker.IsImmutable
// Returns true if the bundle parameter contains a digest reference (@sha256:...)
func (r *Resolver) IsImmutable(params []v1.Param) bool {
var bundleRef string
for _, param := range params {
if param.Name == bundleresolution.ParamBundle {
bundleRef = param.Value.StringVal
break
}
}
// Checks if the given string looks like an OCI pull spec by digest.
// Only SHA-256 digests are considered immutable.
// Examples:
// - image@sha256:abc123...
// - registry.io/image@sha256:abc123...
// - registry.io/image:tag@sha256:abc123... (tag is ignored when digest is present)
return strings.Contains(bundleRef, "@sha256:")
}
// Resolve uses the given params to resolve the requested file or resource.
func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) {
if len(req.Params) == 0 {
return nil, errors.New("no params")
}
if cache.ShouldUse(ctx, r, req.Params, LabelValueBundleResolverType) {
return cache.GetFromCacheOrResolve(
ctx,
req.Params,
LabelValueBundleResolverType,
func() (resolutionframework.ResolvedResource, error) {
return r.resolveRequestFunc(ctx, r.kubeClientSet, req)
},
)
}
return r.resolveRequestFunc(ctx, r.kubeClientSet, req)
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"context"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
clusterresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/cluster"
resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
)
const (
// LabelValueClusterResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
LabelValueClusterResolverType = "cluster"
// ClusterResolverName is the name that the cluster resolver should be
// associated with
ClusterResolverName = "Cluster"
)
var _ framework.Resolver = (*Resolver)(nil)
var _ resolutionframework.ConfigWatcher = (*Resolver)(nil)
var _ cache.ImmutabilityChecker = (*Resolver)(nil)
// Resolver implements a framework.Resolver that can fetch resources from the same cluster.
type Resolver struct {
pipelineClientSet versioned.Interface
}
// Initialize sets up any dependencies needed by the Resolver.
func (r *Resolver) Initialize(ctx context.Context) error {
r.pipelineClientSet = pipelineclient.Get(ctx)
return nil
}
// GetName returns a string name to refer to this Resolver by.
func (r *Resolver) GetName(_ context.Context) string {
return ClusterResolverName
}
// GetSelector returns a map of labels to match against tasks requesting
// resolution from this Resolver.
func (r *Resolver) GetSelector(_ context.Context) map[string]string {
return map[string]string{
resolutioncommon.LabelKeyResolverType: LabelValueClusterResolverType,
}
}
// GetConfigName returns the name of the cluster resolver's configmap.
func (r *Resolver) GetConfigName(_ context.Context) string {
return clusterresolution.ConfigMapName
}
// Validate ensures parameters from a request are as expected.
func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
return clusterresolution.ValidateParams(ctx, req.Params)
}
// IsImmutable implements ImmutabilityChecker.IsImmutable
// Returns false because cluster resources don't have immutable references
func (r *Resolver) IsImmutable([]v1.Param) bool {
// Cluster resources (Tasks, Pipelines, etc.) don't have immutable references
// like Git commit hashes or bundle digests, so we always return false
return false
}
// Resolve uses the given params to resolve the requested file or resource.
func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) {
if cache.ShouldUse(ctx, r, req.Params, LabelValueClusterResolverType) {
return cache.GetFromCacheOrResolve(
ctx,
req.Params,
LabelValueClusterResolverType,
func() (resolutionframework.ResolvedResource, error) {
return clusterresolution.ResolveFromParams(ctx, req.Params, r.pipelineClientSet)
},
)
}
return clusterresolution.ResolveFromParams(ctx, req.Params, r.pipelineClientSet)
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
)
const (
// cacheAnnotationKey is the annotation key indicating if a resource was cached
cacheAnnotationKey = "resolution.tekton.dev/cached"
// cacheTimestampKey is the annotation key for when the resource was cached
cacheTimestampKey = "resolution.tekton.dev/cache-timestamp"
// cacheResolverTypeKey is the annotation key for the resolver type that cached it
cacheResolverTypeKey = "resolution.tekton.dev/cache-resolver-type"
// cacheOperationKey is the annotation key for the cache operation type
cacheOperationKey = "resolution.tekton.dev/cache-operation"
// cacheValueTrue is the value used for cache annotations
cacheValueTrue = "true"
// cacheOperationStore is the value for cache store operations
cacheOperationStore = "store"
// cacheOperationRetrieve is the value for cache retrieve operations
cacheOperationRetrieve = "retrieve"
)
// annotatedResource wraps a ResolvedResource with cache annotations
type annotatedResource struct {
resource resolutionframework.ResolvedResource
annotations map[string]string
}
func newAnnotatedResource(
resource resolutionframework.ResolvedResource,
resolverType,
operation string,
timestamp string,
) *annotatedResource {
// Create a new map to avoid concurrent map writes when the same resource
// is being annotated from multiple goroutines
existingAnnotations := resource.Annotations()
annotations := make(map[string]string)
for k, v := range existingAnnotations {
annotations[k] = v
}
annotations[cacheAnnotationKey] = cacheValueTrue
annotations[cacheTimestampKey] = timestamp
annotations[cacheResolverTypeKey] = resolverType
annotations[cacheOperationKey] = operation
return &annotatedResource{
resource: resource,
annotations: annotations,
}
}
// Data returns the bytes of the resource
func (a *annotatedResource) Data() []byte {
return a.resource.Data()
}
// Annotations returns the annotations with cache metadata
func (a *annotatedResource) Annotations() map[string]string {
return a.annotations
}
// RefSource returns the source reference of the remote data
func (a *annotatedResource) RefSource() *v1.RefSource {
return a.resource.RefSource()
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"crypto/sha256"
"encoding/hex"
"sort"
"time"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"go.uber.org/zap"
utilcache "k8s.io/apimachinery/pkg/util/cache"
)
var _ resolutionframework.ConfigWatcher = (*resolverCache)(nil)
// resolverCache is a wrapper around utilcache.LRUExpireCache that provides
// type-safe methods for caching resolver results.
type resolverCache struct {
cache *utilcache.LRUExpireCache
logger *zap.SugaredLogger
ttl time.Duration
maxSize int
clock utilcache.Clock
}
func newResolverCache(maxSize int, ttl time.Duration) *resolverCache {
return newResolverCacheWithClock(maxSize, ttl, realClock{})
}
func newResolverCacheWithClock(maxSize int, ttl time.Duration, clock utilcache.Clock) *resolverCache {
return &resolverCache{
cache: utilcache.NewLRUExpireCacheWithClock(maxSize, clock),
ttl: ttl,
maxSize: maxSize,
clock: clock,
}
}
// GetConfigName returns the name of the cache's configmap.
func (c *resolverCache) GetConfigName(_ context.Context) string {
return getCacheConfigName()
}
// withLogger returns a new ResolverCache instance with the provided logger.
// This prevents state leak by not storing logger in the global singleton.
func (c *resolverCache) withLogger(logger *zap.SugaredLogger) *resolverCache {
return &resolverCache{logger: logger, cache: c.cache, ttl: c.ttl, maxSize: c.maxSize, clock: c.clock}
}
// TTL returns the time-to-live duration for cache entries.
func (c *resolverCache) TTL() time.Duration {
return c.ttl
}
// MaxSize returns the maximum number of entries the cache can hold.
func (c *resolverCache) MaxSize() int {
return c.maxSize
}
// Get retrieves a cached resource by resolver type and parameters, returning
// the resource and whether it was found.
func (c *resolverCache) Get(resolverType string, params []pipelinev1.Param) (resolutionframework.ResolvedResource, bool) {
key := generateCacheKey(resolverType, params)
value, found := c.cache.Get(key)
if !found {
c.infow("Cache miss", "key", key)
return nil, found
}
resource, ok := value.(resolutionframework.ResolvedResource)
if !ok {
c.infow("Failed casting cached resource", "key", key)
return nil, false
}
c.infow("Cache hit", "key", key)
timestamp := c.clock.Now().Format(time.RFC3339)
return newAnnotatedResource(resource, resolverType, cacheOperationRetrieve, timestamp), true
}
func (c *resolverCache) infow(msg string, keysAndValues ...any) {
if c.logger != nil {
c.logger.Infow(msg, keysAndValues...)
}
}
// Add stores a resource in the cache with the configured TTL and returns an
// annotated version of the resource.
func (c *resolverCache) Add(
resolverType string,
params []pipelinev1.Param,
resource resolutionframework.ResolvedResource,
) resolutionframework.ResolvedResource {
key := generateCacheKey(resolverType, params)
c.infow("Adding to cache", "key", key, "expiration", c.ttl)
timestamp := c.clock.Now().Format(time.RFC3339)
annotatedResource := newAnnotatedResource(resource, resolverType, cacheOperationStore, timestamp)
c.cache.Add(key, annotatedResource, c.ttl)
return annotatedResource
}
// Remove deletes a cached resource identified by resolver type and parameters.
func (c *resolverCache) Remove(resolverType string, params []pipelinev1.Param) {
key := generateCacheKey(resolverType, params)
c.infow("Removing from cache", "key", key)
c.cache.Remove(key)
}
// Clear removes all entries from the cache.
func (c *resolverCache) Clear() {
c.infow("Clearing all cache entries")
// predicate that returns true clears all entries
c.cache.RemoveAll(func(_ any) bool { return true })
}
func generateCacheKey(resolverType string, params []pipelinev1.Param) string {
// Create a deterministic string representation of the parameters
paramStr := resolverType + ":"
// Filter out the 'cache' parameter and sort remaining params by name for determinism
filteredParams := make([]pipelinev1.Param, 0, len(params))
for _, p := range params {
if p.Name != CacheParam {
filteredParams = append(filteredParams, p)
}
}
// Sort params by name to ensure deterministic ordering
sort.Slice(filteredParams, func(i, j int) bool {
return filteredParams[i].Name < filteredParams[j].Name
})
for _, p := range filteredParams {
paramStr += p.Name + "="
switch p.Value.Type {
case pipelinev1.ParamTypeString:
paramStr += p.Value.StringVal
case pipelinev1.ParamTypeArray:
// Sort array values for determinism
arrayVals := make([]string, len(p.Value.ArrayVal))
copy(arrayVals, p.Value.ArrayVal)
sort.Strings(arrayVals)
for i, val := range arrayVals {
if i > 0 {
paramStr += ","
}
paramStr += val
}
case pipelinev1.ParamTypeObject:
// Sort object keys for determinism
keys := make([]string, 0, len(p.Value.ObjectVal))
for k := range p.Value.ObjectVal {
keys = append(keys, k)
}
sort.Strings(keys)
for i, key := range keys {
if i > 0 {
paramStr += ","
}
paramStr += key + ":" + p.Value.ObjectVal[key]
}
default:
// For unknown types, use StringVal as fallback
paramStr += p.Value.StringVal
}
paramStr += ";"
}
// Generate a SHA-256 hash of the parameter string
hash := sha256.Sum256([]byte(paramStr))
return hex.EncodeToString(hash[:])
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import "time"
// realClock implements Clock using the actual system time.
type realClock struct{}
func (realClock) Now() time.Time {
return time.Now()
}
// fakeClock implements Clock with a controllable time for testing.
type fakeClock struct {
now time.Time
}
// Now returns the current time of the fake clock.
func (f *fakeClock) Now() time.Time {
return f.now
}
// Advance moves the fake clock forward by the given duration.
func (f *fakeClock) Advance(d time.Duration) {
f.now = f.now.Add(d)
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"os"
"strconv"
"sync"
"time"
corev1 "k8s.io/api/core/v1"
"knative.dev/pkg/configmap"
)
const (
// resolverCacheConfigMapNameEnv env var overwrites the cache ConfigMap name
// defaults to "resolver-cache-config"
resolverCacheConfigMapNameEnv = "RESOLVER_CACHE_CONFIG_MAP_NAME"
// defaultConfigMapName is the default name of the ConfigMap that configures resolver cache settings
// the ConfigMap contains max-size and ttl configuration for the shared resolver cache
defaultConfigMapName = "resolver-cache-config"
maxSizeConfigMapKey = "max-size"
ttlConfigMapKey = "ttl"
defaultCacheSize = 1000
defaultExpiration = 5 * time.Minute
)
var (
cacheMu sync.Mutex
startWatchingOnce sync.Once
)
type cacheConfigKey struct{}
// Config holds the configuration for the resolver cache
type Config struct {
MaxSize int
TTL time.Duration
}
type CacheConfigStore struct {
untyped *configmap.UntypedStore
cacheConfigName string
}
func NewCacheConfigStore(cacheConfigName string, logger configmap.Logger) *CacheConfigStore {
return &CacheConfigStore{
cacheConfigName: cacheConfigName,
untyped: configmap.NewUntypedStore(
defaultConfigMapName,
logger,
configmap.Constructors{
getCacheConfigName(): NewConfigFromConfigMap,
},
onCacheConfigChanged,
),
}
}
func (store *CacheConfigStore) WatchConfigs(w configmap.Watcher) {
startWatchingOnce.Do(func() {
store.untyped.WatchConfigs(w)
})
}
func (store *CacheConfigStore) GetResolverConfig() *Config {
untypedConf := store.untyped.UntypedLoad(store.cacheConfigName)
if cacheConf, ok := untypedConf.(*Config); ok {
return cacheConf
}
return &Config{
MaxSize: defaultCacheSize,
TTL: defaultExpiration,
}
}
// ToContext returns a new context with the cache's configuration
// data stored in it.
func (store *CacheConfigStore) ToContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheConfigKey{}, store.GetResolverConfig())
}
// getCacheConfigName returns the name of the cache configuration ConfigMap.
// This can be overridden via the cacheConfigEnv environment variable.
func getCacheConfigName() string {
if configMapName := os.Getenv(resolverCacheConfigMapNameEnv); configMapName != "" {
return configMapName
}
return defaultConfigMapName
}
// NewConfigFromConfigMap creates a Config from a ConfigMap
func NewConfigFromConfigMap(cm *corev1.ConfigMap) (*Config, error) {
config := &Config{
MaxSize: defaultCacheSize,
TTL: defaultExpiration,
}
if cm == nil {
return config, nil
}
if maxSizeStr, ok := cm.Data[maxSizeConfigMapKey]; ok {
if parsed, err := strconv.Atoi(maxSizeStr); err == nil && parsed > 0 {
config.MaxSize = parsed
}
}
if ttlStr, ok := cm.Data[ttlConfigMapKey]; ok {
if parsed, err := time.ParseDuration(ttlStr); err == nil && parsed > 0 {
config.TTL = parsed
}
}
return config, nil
}
func onCacheConfigChanged(_ string, value any) {
config, ok := value.(*Config)
if !ok {
return
}
cacheMu.Lock()
defer cacheMu.Unlock()
sharedCache = newResolverCache(config.MaxSize, config.TTL)
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"fmt"
"slices"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
)
const (
cacheModeAlways = "always"
cacheModeNever = "never"
cacheModeAuto = "auto"
CacheParam = "cache"
defaultCacheModeConfigMapKey = "default-cache-mode"
)
// ImmutabilityChecker extends the base Resolver interface with cache-specific methods.
// Each resolver implements IsImmutable to define what "auto" mode means in their context.
type ImmutabilityChecker interface {
IsImmutable(params []v1.Param) bool
}
// ShouldUse determines whether caching should be used based on:
// 1. Task/Pipeline cache parameter (highest priority)
// 2. ConfigMap default-cache-mode (middle priority)
// 3. System default for resolver type (lowest priority)
func ShouldUse(
ctx context.Context,
resolver ImmutabilityChecker,
params []v1.Param,
resolverType string,
) bool {
// Get cache mode from task parameter
cacheMode := ""
for _, param := range params {
if param.Name == CacheParam {
cacheMode = param.Value.StringVal
break
}
}
// If no task parameter, get default from ConfigMap
if cacheMode == "" {
conf := resolutionframework.GetResolverConfigFromContext(ctx)
// This can be optionally set in individual resolver ConfigMaps (e.g., bundleresolver-config,
// git-resolver-config, cluster-resolver-config) to override the system default cache
// mode for that resolver. Valid values: "always", "never", "auto"
if defaultMode, ok := conf[defaultCacheModeConfigMapKey]; ok {
cacheMode = defaultMode
}
}
// If still no mode, use system default
if cacheMode == "" {
cacheMode = cacheModeAuto
}
switch cacheMode {
case cacheModeAlways:
return true
case cacheModeNever:
return false
case cacheModeAuto:
return resolver.IsImmutable(params)
default:
return resolver.IsImmutable(params)
}
}
// Validate returns an error if the cache mode is not "always", "never",
// "auto", or empty string (which defaults to auto).
func Validate(cacheMode string) error {
// Empty string is valid - it will default to auto mode in ShouldUse
if cacheMode == "" {
return nil
}
validCacheModes := []string{cacheModeAlways, cacheModeNever, cacheModeAuto}
if slices.Contains(validCacheModes, cacheMode) {
return nil
}
return fmt.Errorf("invalid cache mode '%s', must be one of: %v (or empty for default)", cacheMode, validCacheModes)
}
type resolveFn = func() (resolutionframework.ResolvedResource, error)
func GetFromCacheOrResolve(
ctx context.Context,
params []v1.Param,
resolverType string,
resolve resolveFn,
) (resolutionframework.ResolvedResource, error) {
cacheInstance := Get(ctx)
if cached, ok := cacheInstance.Get(resolverType, params); ok {
return cached, nil
}
// If cache miss, resolve from params
resource, err := resolve()
if err != nil {
return nil, err
}
// Store annotated resource with store operation and return annotated resource
// to indicate it was stored in cache
return cacheInstance.Add(resolverType, params, resource), nil
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"sync"
"k8s.io/client-go/rest"
"knative.dev/pkg/injection"
"knative.dev/pkg/logging"
)
var (
sharedCache *resolverCache
cacheInitOnce sync.Once
)
type resolverCacheKey struct{}
func init() {
injection.Default.RegisterClient(addCacheWithLoggerToCtx)
}
func addCacheWithLoggerToCtx(ctx context.Context, _ *rest.Config) context.Context {
return context.WithValue(ctx, resolverCacheKey{}, createCacheOnce(ctx))
}
func createCacheOnce(ctx context.Context) *resolverCache {
cacheInitOnce.Do(func() {
cacheMu.Lock()
defer cacheMu.Unlock()
sharedCache = newResolverCache(defaultCacheSize, defaultExpiration)
})
return sharedCache.withLogger(
logging.FromContext(ctx),
)
}
// Get extracts the ResolverCache from the context.
// If the cache is not available in the context (e.g., in tests),
// it falls back to the shared cache with a logger from the context.
func Get(ctx context.Context) *resolverCache {
if untyped := ctx.Value(resolverCacheKey{}); untyped != nil {
return untyped.(*resolverCache)
}
return createCacheOnce(ctx)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"strings"
rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client"
rrinformer "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest"
rrcache "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache"
framework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/clock"
kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
)
// ReconcilerModifier is a func that can access and modify a reconciler
// in the moments before a resolver is started. It allows for
// things like injecting a test clock.
type ReconcilerModifier = func(reconciler *Reconciler)
// NewController returns a knative controller for a Tekton Resolver.
// This sets up a lot of the boilerplate that individual resolvers
// shouldn't need to be concerned with since it's common to all of them.
func NewController(ctx context.Context, resolver Resolver, modifiers ...ReconcilerModifier) func(context.Context, configmap.Watcher) *controller.Impl {
if err := framework.ValidateResolver(ctx, resolver.GetSelector(ctx)); err != nil {
panic(err.Error())
}
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
logger := logging.FromContext(ctx)
kubeclientset := kubeclient.Get(ctx)
rrclientset := rrclient.Get(ctx)
rrInformer := rrinformer.Get(ctx)
if err := resolver.Initialize(ctx); err != nil {
panic(err.Error())
}
r := &Reconciler{
LeaderAwareFuncs: framework.LeaderAwareFuncs(rrInformer.Lister()),
kubeClientSet: kubeclientset,
resolutionRequestLister: rrInformer.Lister(),
resolutionRequestClientSet: rrclientset,
resolver: resolver,
}
watchConfigChanges(ctx, r, cmw)
watchCacheConfigChanges(ctx, r, cmw)
// TODO(sbwsg): Do better sanitize.
resolverName := resolver.GetName(ctx)
resolverName = strings.ReplaceAll(resolverName, "/", "")
resolverName = strings.ReplaceAll(resolverName, " ", "")
applyModifiersAndDefaults(ctx, r, modifiers)
impl := controller.NewContext(ctx, r, controller.ControllerOptions{
WorkQueueName: "TektonResolverFramework." + resolverName,
Logger: logger,
})
_, err := rrInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: framework.FilterResolutionRequestsBySelector(resolver.GetSelector(ctx)),
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: impl.Enqueue,
UpdateFunc: func(oldObj, newObj interface{}) {
impl.Enqueue(newObj)
},
// TODO(sbwsg): should we deliver delete events
// to the resolver?
// DeleteFunc: impl.Enqueue,
},
})
if err != nil {
logging.FromContext(ctx).Panicf("Couldn't register ResolutionRequest informer event handler: %w", err)
}
return impl
}
}
// watchConfigChanges binds a framework.Resolver to updates on its
// configmap, using knative's configmap helpers. This is only done if
// the resolver implements the framework.ConfigWatcher interface.
func watchConfigChanges(ctx context.Context, reconciler *Reconciler, cmw configmap.Watcher) {
if configWatcher, ok := reconciler.resolver.(framework.ConfigWatcher); ok {
logger := logging.FromContext(ctx)
resolverConfigName := configWatcher.GetConfigName(ctx)
if resolverConfigName == "" {
panic("resolver returned empty config name")
}
reconciler.configStore = framework.NewConfigStore(resolverConfigName, logger)
reconciler.configStore.WatchConfigs(cmw)
}
}
func watchCacheConfigChanges(ctx context.Context, reconciler *Reconciler, cmw configmap.Watcher) {
logger := logging.FromContext(ctx)
cacheInstance := rrcache.Get(ctx)
cacheConfigName := cacheInstance.GetConfigName(ctx)
if cacheConfigName == "" {
logger.Error("failed to setup cache config watcher, cache returned empty config name")
return
}
cacheConfigStore := rrcache.NewCacheConfigStore(cacheConfigName, logger)
cacheConfigStore.WatchConfigs(cmw)
}
// applyModifiersAndDefaults applies the given modifiers to
// a reconciler and, after doing so, sets any default values for things
// that weren't set by a modifier.
func applyModifiersAndDefaults(ctx context.Context, r *Reconciler, modifiers []ReconcilerModifier) {
for _, mod := range modifiers {
mod(r)
}
if r.Clock == nil {
r.Clock = clock.RealClock{}
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"fmt"
"time"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
)
const FakeUrl string = "fake://url"
var _ Resolver = &FakeResolver{}
// FakeResolver implements a framework.Resolver that can fetch pre-configured strings based on a parameter value, or return
// resolution attempts with a configured error.
type FakeResolver framework.FakeResolver
// Initialize performs any setup required by the fake resolver.
func (r *FakeResolver) Initialize(ctx context.Context) error {
if r.ForParam == nil {
r.ForParam = make(map[string]*framework.FakeResolvedResource)
}
return nil
}
// GetName returns the string name that the fake resolver should be
// associated with.
func (r *FakeResolver) GetName(_ context.Context) string {
return framework.FakeResolverName
}
// GetSelector returns the labels that resource requests are required to have for
// the fake resolver to process them.
func (r *FakeResolver) GetSelector(_ context.Context) map[string]string {
return map[string]string{
resolutioncommon.LabelKeyResolverType: framework.LabelValueFakeResolverType,
}
}
// Validate returns an error if the given parameter map is not
// valid for a resource request targeting the fake resolver.
func (r *FakeResolver) Validate(_ context.Context, req *v1beta1.ResolutionRequestSpec) error {
if len(req.Params) > 0 {
return framework.ValidateParams(req.Params)
}
if req.URL != FakeUrl {
return fmt.Errorf("Wrong url. Expected: %s, Got: %s", FakeUrl, req.URL)
}
return nil
}
// Resolve performs the work of fetching a file from the fake resolver given a map of
// parameters.
func (r *FakeResolver) Resolve(_ context.Context, req *v1beta1.ResolutionRequestSpec) (framework.ResolvedResource, error) {
if len(req.Params) > 0 {
return framework.Resolve(req.Params, r.ForParam)
}
frr, ok := r.ForParam[req.URL]
if !ok {
return nil, fmt.Errorf("couldn't find resource for url %s", req.URL)
}
return frr, nil
}
var _ framework.TimedResolution = &FakeResolver{}
// GetResolutionTimeout returns the configured timeout for the reconciler, or the default time.Duration if not configured.
func (r *FakeResolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration, params map[string]string) (time.Duration, error) {
return framework.GetResolutionTimeout(r.Timeout, defaultTimeout), nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"time"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
rrv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1"
rrcache "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/clock"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
"knative.dev/pkg/reconciler"
)
// defaultMaximumResolutionDuration is the maximum amount of time
// resolution may take.
// defaultMaximumResolutionDuration is the max time that a call to
// Resolve() may take. It can be overridden by a resolver implementing
// the framework.TimedResolution interface.
const defaultMaximumResolutionDuration = time.Minute
// statusDataPatch is the json structure that will be PATCHed into
// a ResolutionRequest with its data and annotations once successfully
// resolved.
type statusDataPatch struct {
Annotations map[string]string `json:"annotations"`
Data string `json:"data"`
Source *pipelinev1beta1.ConfigSource `json:"source"`
RefSource *pipelinev1.RefSource `json:"refSource"`
}
// Reconciler handles ResolutionRequest objects, performs functionality
// common to all resolvers and delegates resolver-specific actions
// to its embedded type-specific Resolver object.
type Reconciler struct {
// Implements reconciler.LeaderAware
reconciler.LeaderAwareFuncs
// Clock is used by the reconciler to track the passage of time
// and can be overridden for tests.
Clock clock.PassiveClock
resolver Resolver
kubeClientSet kubernetes.Interface
resolutionRequestLister rrv1beta1.ResolutionRequestLister
resolutionRequestClientSet rrclient.Interface
configStore *framework.ConfigStore
}
var _ reconciler.LeaderAware = &Reconciler{}
// Reconcile receives the string key of a ResolutionRequest object, looks
// it up, checks it for common errors, and then delegates
// resolver-specific functionality to the reconciler's embedded
// type-specific resolver. Any errors that occur during validation or
// resolution are handled by updating or failing the ResolutionRequest.
func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
err = &resolutioncommon.InvalidResourceKeyError{Key: key, Original: err}
return controller.NewPermanentError(err)
}
rr, err := r.resolutionRequestLister.ResolutionRequests(namespace).Get(name)
if err != nil {
err := &resolutioncommon.GetResourceError{ResolverName: "resolutionrequest", Key: key, Original: err}
return controller.NewPermanentError(err)
}
if rr.IsDone() {
return nil
}
// Inject request-scoped information into the context, such as
// the namespace that the request originates from and the
// configuration from the configmap this resolver is watching.
ctx = resolutioncommon.InjectRequestNamespace(ctx, namespace)
ctx = resolutioncommon.InjectRequestName(ctx, name)
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
return r.resolve(ctx, key, rr)
}
func (r *Reconciler) resolve(ctx context.Context, key string, rr *v1beta1.ResolutionRequest) error {
errChan := make(chan error)
resourceChan := make(chan framework.ResolvedResource)
paramsMap := make(map[string]string)
for _, p := range rr.Spec.Params {
paramsMap[p.Name] = p.Value.StringVal
}
// Centralized cache parameter validation for all resolvers
if cacheMode, exists := paramsMap[rrcache.CacheParam]; exists && cacheMode != "" {
if err := rrcache.Validate(cacheMode); err != nil {
return &resolutioncommon.InvalidRequestError{
ResolutionRequestKey: key,
Message: err.Error(),
}
}
}
timeoutDuration := defaultMaximumResolutionDuration
if timed, ok := r.resolver.(framework.TimedResolution); ok {
var err error
timeoutDuration, err = timed.GetResolutionTimeout(ctx, defaultMaximumResolutionDuration, paramsMap)
if err != nil {
return err
}
}
// A new context is created for resolution so that timeouts can
// be enforced without affecting other uses of ctx (e.g. sending
// Updates to ResolutionRequest objects).
resolutionCtx, cancelFn := context.WithTimeout(ctx, timeoutDuration)
defer cancelFn()
go func() {
validationError := r.resolver.Validate(resolutionCtx, &rr.Spec)
if validationError != nil {
errChan <- &resolutioncommon.InvalidRequestError{
ResolutionRequestKey: key,
Message: validationError.Error(),
}
return
}
resource, resolveErr := r.resolver.Resolve(resolutionCtx, &rr.Spec)
if resolveErr != nil {
errChan <- &resolutioncommon.GetResourceError{
ResolverName: r.resolver.GetName(resolutionCtx),
Key: key,
Original: resolveErr,
}
return
}
resourceChan <- resource
}()
select {
case err := <-errChan:
if err != nil {
return r.OnError(ctx, rr, err)
}
case <-resolutionCtx.Done():
if err := resolutionCtx.Err(); err != nil {
return r.OnError(ctx, rr, err)
}
case resource := <-resourceChan:
return r.writeResolvedData(ctx, rr, resource)
}
return errors.New("unknown error")
}
// OnError is used to handle any situation where a ResolutionRequest has
// reached a terminal situation that cannot be recovered from.
func (r *Reconciler) OnError(ctx context.Context, rr *v1beta1.ResolutionRequest, err error) error {
if resolutioncommon.IsErrTransient(err) {
return err
}
if rr == nil {
return controller.NewPermanentError(err)
}
if err != nil {
_ = r.MarkFailed(ctx, rr, err)
return controller.NewPermanentError(err)
}
return nil
}
// MarkFailed updates a ResolutionRequest as having failed. It returns
// errors that occur during the update process or nil if the update
// appeared to succeed.
func (r *Reconciler) MarkFailed(ctx context.Context, rr *v1beta1.ResolutionRequest, resolutionErr error) error {
key := fmt.Sprintf("%s/%s", rr.Namespace, rr.Name)
reason, resolutionErr := resolutioncommon.ReasonError(resolutionErr)
latestGeneration, err := r.resolutionRequestClientSet.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Get(ctx, rr.Name, metav1.GetOptions{})
if err != nil {
logging.FromContext(ctx).Warnf("error getting latest generation of resolutionrequest %q: %v", key, err)
return err
}
if latestGeneration.IsDone() {
return nil
}
latestGeneration.Status.MarkFailed(reason, resolutionErr.Error())
_, err = r.resolutionRequestClientSet.ResolutionV1beta1().ResolutionRequests(rr.Namespace).UpdateStatus(ctx, latestGeneration, metav1.UpdateOptions{})
if err != nil {
logging.FromContext(ctx).Warnf("error marking resolutionrequest %q as failed: %v", key, err)
return err
}
return nil
}
func (r *Reconciler) writeResolvedData(ctx context.Context, rr *v1beta1.ResolutionRequest, resource framework.ResolvedResource) error {
encodedData := base64.StdEncoding.Strict().EncodeToString(resource.Data())
patchBytes, err := json.Marshal(map[string]statusDataPatch{
"status": {
Data: encodedData,
Annotations: resource.Annotations(),
RefSource: resource.RefSource(),
Source: (*pipelinev1beta1.ConfigSource)(resource.RefSource()),
},
})
if err != nil {
logging.FromContext(ctx).Warnf("writeResolvedData error serializing resource request patch for resolution request %s:%s: %s", rr.Namespace, rr.Name, err.Error())
return r.OnError(ctx, rr, &resolutioncommon.UpdatingRequestError{
ResolutionRequestKey: fmt.Sprintf("%s/%s", rr.Namespace, rr.Name),
Original: fmt.Errorf("error serializing resource request patch: %w", err),
})
}
_, err = r.resolutionRequestClientSet.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Patch(ctx, rr.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if err != nil {
logging.FromContext(ctx).Warnf("writeResolvedData error patching resolution request %s:%s: %s", rr.Namespace, rr.Name, err.Error())
return r.OnError(ctx, rr, &resolutioncommon.UpdatingRequestError{
ResolutionRequestKey: fmt.Sprintf("%s/%s", rr.Namespace, rr.Name),
Original: err,
})
}
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"encoding/base64"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
"github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
"github.com/tektoncd/pipeline/test/names"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
testclock "k8s.io/utils/clock/testing"
"knative.dev/pkg/apis"
cminformer "knative.dev/pkg/configmap/informer"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
pkgreconciler "knative.dev/pkg/reconciler"
"knative.dev/pkg/system"
)
var (
now = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC)
testClock = testclock.NewFakePassiveClock(now)
ignoreLastTransitionTime = cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime.Inner.Time")
ignoreCacheTimestamp = cmpopts.IgnoreMapEntries(func(k, v string) bool {
return strings.HasPrefix(k, "resolution.tekton.dev/cache")
})
)
// ResolverReconcileTestModifier is a function thaat will be invoked after the test assets and controller have been created
type ResolverReconcileTestModifier = func(resolver framework.Resolver, testAssets test.Assets)
// RunResolverReconcileTest takes data to seed clients and informers, a Resolver, a ResolutionRequest, and the expected
// ResolutionRequestStatus and error, both of which can be nil. It instantiates a controller for that resolver and
// reconciles the given request. It then checks for the expected error, if any, and compares the resulting status with
// the expected status.
func RunResolverReconcileTest(ctx context.Context, t *testing.T, d test.Data, resolver framework.Resolver, request *v1beta1.ResolutionRequest,
expectedStatus *v1beta1.ResolutionRequestStatus, expectedErr error, resolverModifiers ...ResolverReconcileTestModifier) {
t.Helper()
testAssets, cancel := GetResolverFrameworkController(ctx, t, d, resolver, setClockOnReconciler)
defer cancel()
for _, rm := range resolverModifiers {
rm(resolver, testAssets)
}
err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRequestName(request)) //nolint
if expectedErr != nil {
if err == nil {
t.Fatalf("expected to get error: `%v`, but got nothing", expectedErr)
}
if expectedErr.Error() != err.Error() {
t.Fatalf("expected to get error `%v`, but got `%v`", expectedErr, err)
}
} else if err != nil {
if ok, _ := controller.IsRequeueKey(err); !ok {
t.Fatalf("did not expect an error, but got `%v`", err)
}
}
c := testAssets.Clients.ResolutionRequests.ResolutionV1beta1()
reconciledRR, err := c.ResolutionRequests(request.Namespace).Get(testAssets.Ctx, request.Name, metav1.GetOptions{}) //nolint
if err != nil {
t.Fatalf("getting updated ResolutionRequest: %v", err)
}
if expectedStatus != nil {
if d := cmp.Diff(*expectedStatus, reconciledRR.Status, ignoreLastTransitionTime, ignoreCacheTimestamp); d != "" {
t.Errorf("ResolutionRequest status doesn't match %s", diff.PrintWantGot(d))
if expectedStatus.Data != "" && expectedStatus.Data != reconciledRR.Status.Data {
decodedExpectedData, err := base64.StdEncoding.Strict().DecodeString(expectedStatus.Data)
if err != nil {
t.Errorf("couldn't decode expected data: %v", err)
return
}
decodedGotData, err := base64.StdEncoding.Strict().DecodeString(reconciledRR.Status.Data)
if err != nil {
t.Errorf("couldn't decode reconciled data: %v", err)
return
}
if d := cmp.Diff(decodedExpectedData, decodedGotData); d != "" {
t.Errorf("decoded data did not match expected: %s", diff.PrintWantGot(d))
}
}
}
}
}
// GetResolverFrameworkController returns an instance of the resolver framework controller/reconciler using the given resolver,
// seeded with d, where d represents the state of the system (existing resources) needed for the test.
func GetResolverFrameworkController(ctx context.Context, t *testing.T, d test.Data, resolver framework.Resolver, modifiers ...framework.ReconcilerModifier) (test.Assets, func()) {
t.Helper()
names.TestingSeed()
return initializeResolverFrameworkControllerAssets(ctx, t, d, resolver, modifiers...)
}
func initializeResolverFrameworkControllerAssets(ctx context.Context, t *testing.T, d test.Data, resolver framework.Resolver, modifiers ...framework.ReconcilerModifier) (test.Assets, func()) {
t.Helper()
ctx, cancel := context.WithCancel(ctx)
ensureConfigurationConfigMapsExist(&d)
c, informers := test.SeedTestData(t, ctx, d)
configMapWatcher := cminformer.NewInformedWatcher(c.Kube, resolverconfig.ResolversNamespace(system.Namespace()))
ctl := framework.NewController(ctx, resolver, modifiers...)(ctx, configMapWatcher)
if err := configMapWatcher.Start(ctx.Done()); err != nil {
t.Fatalf("error starting configmap watcher: %v", err)
}
if la, ok := ctl.Reconciler.(pkgreconciler.LeaderAware); ok {
_ = la.Promote(pkgreconciler.UniversalBucket(), func(pkgreconciler.Bucket, types.NamespacedName) {})
}
return test.Assets{
Logger: logging.FromContext(ctx),
Controller: ctl,
Clients: c,
Informers: informers,
Recorder: controller.GetEventRecorder(ctx).(*record.FakeRecorder),
Ctx: ctx,
}, cancel
}
func getRequestName(rr *v1beta1.ResolutionRequest) string {
return strings.Join([]string{rr.Namespace, rr.Name}, "/")
}
func setClockOnReconciler(r *framework.Reconciler) {
if r.Clock == nil {
r.Clock = testClock
}
}
func ensureConfigurationConfigMapsExist(d *test.Data) {
var featureFlagsExists bool
var resolverCacheConfigExists bool
for _, cm := range d.ConfigMaps {
if cm.Name == resolverconfig.GetFeatureFlagsConfigName() {
featureFlagsExists = true
}
if cm.Name == "resolver-cache-config" {
resolverCacheConfigExists = true
}
}
if !featureFlagsExists {
d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: resolverconfig.GetFeatureFlagsConfigName(),
Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
},
Data: map[string]string{},
})
}
if !resolverCacheConfigExists {
d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "resolver-cache-config",
Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
},
Data: map[string]string{},
})
}
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package git
import (
"context"
"errors"
"time"
"github.com/jenkins-x/go-scm/scm"
"github.com/jenkins-x/go-scm/scm/factory"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/git"
"go.uber.org/zap"
k8scache "k8s.io/apimachinery/pkg/util/cache"
"k8s.io/client-go/kubernetes"
kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/pkg/logging"
)
const (
disabledError = "cannot handle resolution request, enable-git-resolver feature flag not true"
gitResolverName = "Git"
// labelValueGitResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
labelValueGitResolverType = "git"
// size of the LRU secrets cache
cacheSize = 1024
// the time to live for a cache entry
ttl = 5 * time.Minute
// git revision parameter name
revisionParam = "revision"
)
var _ framework.Resolver = (*Resolver)(nil)
var _ resolutionframework.ConfigWatcher = (*Resolver)(nil)
var _ cache.ImmutabilityChecker = (*Resolver)(nil)
var _ resolutionframework.TimedResolution = (*Resolver)(nil)
// Resolver implements a framework.Resolver that can fetch files from git.
type Resolver struct {
kubeClient kubernetes.Interface
logger *zap.SugaredLogger
cache *k8scache.LRUExpireCache
ttl time.Duration
// Function for creating a SCM client so we can change it in tests.
clientFunc func(string, string, string, ...factory.ClientOptionFunc) (*scm.Client, error)
}
// Initialize performs any setup required by the git resolver.
func (r *Resolver) Initialize(ctx context.Context) error {
r.kubeClient = kubeclient.Get(ctx)
r.logger = logging.FromContext(ctx).Named(gitResolverName)
if r.cache == nil {
r.cache = k8scache.NewLRUExpireCache(cacheSize)
}
if r.ttl == 0 {
r.ttl = ttl
}
if r.clientFunc == nil {
r.clientFunc = factory.NewClient
}
return nil
}
// GetName returns the string name that the git resolver should be
// associated with.
func (r *Resolver) GetName(_ context.Context) string {
return gitResolverName
}
// GetSelector returns the labels that resource requests are required to have for
// the gitresolver to process them.
func (r *Resolver) GetSelector(_ context.Context) map[string]string {
return map[string]string{
resolutioncommon.LabelKeyResolverType: labelValueGitResolverType,
}
}
// GetConfigName returns the name of the git resolver's configmap.
func (r *Resolver) GetConfigName(_ context.Context) string {
return git.ConfigMapName
}
// Validate returns an error if the given parameter map is not
// valid for a resource request targeting the gitresolver.
func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
return git.ValidateParams(ctx, req.Params)
}
// IsImmutable implements ImmutabilityChecker.IsImmutable
// Returns true if the revision parameter is a commit SHA (40-character hex string)
func (r *Resolver) IsImmutable(params []v1.Param) bool {
var revision string
for _, param := range params {
if param.Name == revisionParam {
revision = param.Value.StringVal
break
}
}
// Checks if the given string looks like a git commit SHA.
// A valid commit SHA is exactly 40 characters of hexadecimal.
if len(revision) != 40 {
return false
}
for _, r := range revision {
if !((r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')) {
return false
}
}
return true
}
// GetResolutionTimeout returns the configured timeout for git resolution requests.
func (r *Resolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration, params map[string]string) (time.Duration, error) {
conf, err := git.GetScmConfigForParamConfigKey(ctx, params)
if err != nil {
return time.Duration(0), err
}
if timeoutString := conf.Timeout; timeoutString != "" {
timeout, err := time.ParseDuration(timeoutString)
if err != nil {
return time.Duration(0), err
}
return timeout, nil
}
return defaultTimeout, nil
}
// Resolve performs the work of fetching a file from git given a map of
// parameters.
func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) {
if len(req.Params) == 0 {
return nil, errors.New("no params")
}
if git.IsDisabled(ctx) {
return nil, errors.New(disabledError)
}
params, err := git.PopulateDefaultParams(ctx, req.Params)
if err != nil {
return nil, err
}
if cache.ShouldUse(ctx, r, req.Params, labelValueGitResolverType) {
return cache.GetFromCacheOrResolve(
ctx,
req.Params,
labelValueGitResolverType,
func() (resolutionframework.ResolvedResource, error) {
return r.resolveViaGit(ctx, params)
},
)
}
return r.resolveViaGit(ctx, params)
}
func (r *Resolver) resolveViaGit(ctx context.Context, params map[string]string) (resolutionframework.ResolvedResource, error) {
g := &git.GitResolver{
KubeClient: r.kubeClient,
Logger: r.logger,
Cache: r.cache,
TTL: r.ttl,
Params: params,
}
if params[git.UrlParam] != "" {
return g.ResolveGitClone(ctx)
}
return g.ResolveAPIGit(ctx, r.clientFunc)
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package http
import (
"context"
"errors"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
"github.com/tektoncd/pipeline/pkg/resolution/common"
resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/http"
"go.uber.org/zap"
"k8s.io/client-go/kubernetes"
kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/pkg/logging"
)
const (
// LabelValueHttpResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
LabelValueHttpResolverType = "http"
disabledError = "cannot handle resolution request, enable-http-resolver feature flag not true"
httpResolverName = "Http"
configMapName = "http-resolver-config"
defaultHttpTimeoutValue = "1m"
defaultBasicAuthSecretKey = "password" // default key in the HTTP password secret
)
var _ framework.Resolver = (*Resolver)(nil)
var _ resolutionframework.ConfigWatcher = (*Resolver)(nil)
// Resolver implements a framework.Resolver that can fetch files from an HTTP URL
type Resolver struct {
kubeClient kubernetes.Interface
logger *zap.SugaredLogger
}
func (r *Resolver) Initialize(ctx context.Context) error {
r.kubeClient = kubeclient.Get(ctx)
r.logger = logging.FromContext(ctx)
return nil
}
// GetName returns a string name to refer to this resolver by.
func (r *Resolver) GetName(_ context.Context) string {
return httpResolverName
}
// GetConfigName returns the name of the http resolver's configmap.
func (r *Resolver) GetConfigName(_ context.Context) string {
return configMapName
}
// GetSelector returns a map of labels to match requests to this resolver.
func (r *Resolver) GetSelector(_ context.Context) map[string]string {
return map[string]string{
common.LabelKeyResolverType: LabelValueHttpResolverType,
}
}
// Validate ensures parameters from a request are as expected.
func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
return http.ValidateParams(ctx, req.Params)
}
// Resolve uses the given params to resolve the requested file or resource.
func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) {
if http.IsDisabled(ctx) {
return nil, errors.New(disabledError)
}
params, err := http.PopulateDefaultParams(ctx, req.Params)
if err != nil {
return nil, err
}
return http.FetchHttpResource(ctx, params, r.kubeClient, r.logger)
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hub
import (
"context"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
"github.com/tektoncd/pipeline/pkg/resolution/common"
resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/hub"
)
const (
// LabelValueHubResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
LabelValueHubResolverType = "hub"
hubResolverName = "Hub"
configMapName = "hubresolver-config"
// ArtifactHubType is the value to use setting the type field to artifact
ArtifactHubType = "artifact"
// TektonHubType is the value to use setting the type field to tekton
TektonHubType = "tekton"
)
var _ framework.Resolver = (*Resolver)(nil)
var _ resolutionframework.ConfigWatcher = (*Resolver)(nil)
// Resolver implements a framework.Resolver that can fetch files from OCI bundles.
type Resolver struct {
// TektonHubURL is the URL for hub resolver with type tekton
TektonHubURL string
// ArtifactHubURL is the URL for hub resolver with type artifact
ArtifactHubURL string
}
// Initialize sets up any dependencies needed by the resolver. None atm.
func (r *Resolver) Initialize(_ context.Context) error {
return nil
}
// GetName returns a string name to refer to this resolver by.
func (r *Resolver) GetName(_ context.Context) string {
return hubResolverName
}
// GetConfigName returns the name of the bundle resolver's configmap.
func (r *Resolver) GetConfigName(_ context.Context) string {
return configMapName
}
// GetSelector returns a map of labels to match requests to this resolver.
func (r *Resolver) GetSelector(_ context.Context) map[string]string {
return map[string]string{
common.LabelKeyResolverType: LabelValueHubResolverType,
}
}
// Validate ensures parameters from a request are as expected.
func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
return hub.ValidateParams(ctx, req.Params, r.TektonHubURL)
}
// Resolve uses the given params to resolve the requested file or resource.
func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) {
return hub.Resolve(ctx, req.Params, r.TektonHubURL, r.ArtifactHubURL)
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"context"
"errors"
rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
rrlisters "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
resolutionresource "github.com/tektoncd/pipeline/pkg/resolution/resource"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
)
// CRDRequester implements the Requester interface using
// ResolutionRequest CRDs.
type CRDRequester struct {
clientset rrclient.Interface
lister rrlisters.ResolutionRequestLister
}
// NewCRDRequester returns an implementation of Requester that uses
// ResolutionRequest CRD objects to mediate between the caller who wants a
// resource (e.g. Tekton Pipelines) and the responder who can fetch
// it (e.g. the gitresolver)
func NewCRDRequester(clientset rrclient.Interface, lister rrlisters.ResolutionRequestLister) *CRDRequester {
return &CRDRequester{clientset, lister}
}
var _ Requester = &CRDRequester{}
// Submit constructs a ResolutionRequest object and submits it to the
// kubernetes cluster, returning any errors experienced while doing so.
// If ResolutionRequest is succeeded then it returns the resolved data.
func (r *CRDRequester) Submit(ctx context.Context, resolver ResolverName, req Request) (ResolvedResource, error) {
rr, _ := r.lister.ResolutionRequests(req.ResolverPayload().Namespace).Get(req.ResolverPayload().Name)
if rr == nil {
if err := r.createResolutionRequest(ctx, resolver, req); err != nil &&
// When the request reconciles frequently, the creation may fail
// because the list informer cache is not updated.
// If the request already exists then we can assume that is in progress.
// The next reconcile will handle it based on the actual situation.
!apierrors.IsAlreadyExists(err) {
return nil, err
}
return nil, resolutioncommon.ErrRequestInProgress
}
if rr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() {
// TODO(sbwsg): This should be where an existing
// resource is given an additional owner reference so
// that it doesn't get deleted until the caller is done
// with it. Use appendOwnerReference and then submit
// update to ResolutionRequest.
return nil, resolutioncommon.ErrRequestInProgress
}
if rr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() {
return resolutionresource.CrdIntoResource(rr), nil
}
message := rr.Status.GetCondition(apis.ConditionSucceeded).GetMessage()
err := resolutioncommon.NewError(resolutioncommon.ReasonResolutionFailed, errors.New(message))
return nil, err
}
func (r *CRDRequester) createResolutionRequest(ctx context.Context, resolver ResolverName, req Request) error {
var owner metav1.OwnerReference
if ownedReq, ok := req.(OwnedRequest); ok {
owner = ownedReq.OwnerRef()
}
rr := resolutionresource.CreateResolutionRequest(ctx, resolver, req.ResolverPayload().Name, req.ResolverPayload().Namespace, req.ResolverPayload().ResolutionSpec.Params, owner)
rr.Spec.URL = req.ResolverPayload().ResolutionSpec.URL
_, err := r.clientset.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Create(ctx, rr, metav1.CreateOptions{})
return err
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"context"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
)
type BasicRequest struct {
resolverPayload ResolverPayload
}
var _ Request = &BasicRequest{}
// NewRequest returns an instance of a BasicRequestV2 with the given resolverPayload.
func NewRequest(resolverPayload ResolverPayload) Request {
return &BasicRequest{resolverPayload}
}
var _ Request = &BasicRequest{}
// Params are the map of parameters associated with this request
func (req *BasicRequest) ResolverPayload() ResolverPayload {
return req.resolverPayload
}
// Requester is the interface implemented by a type that knows how to
// submit requests for remote resources.
type Requester interface {
// Submit accepts the name of a resolver to submit a request to
// along with the request itself.
Submit(ctx context.Context, name ResolverName, req Request) (ResolvedResource, error)
}
// Request is implemented by any type that represents a single request
// for a remote resource. Implementing this interface gives the underlying
// type an opportunity to control properties such as whether the name of
// a request has particular properties, whether the request should be made
// to a specific namespace, and precisely which parameters should be included.
type Request interface {
ResolverPayload() ResolverPayload
}
// ResolverPayload is the struct which holds the payload to create
// the Resolution Request CRD.
type ResolverPayload struct {
Name string
Namespace string
ResolutionSpec *v1beta1.ResolutionRequestSpec
}
// ResolutionRequester is the interface implemented by a type that knows how to
// submit requests for remote resources.
type ResolutionRequester interface {
// SubmitResolutionRequest accepts the name of a resolver to submit a request to
// along with the request itself.
SubmitResolutionRequest(ctx context.Context, name ResolverName, req RequestRemoteResource) (ResolvedResource, error)
}
// RequestRemoteResource is implemented by any type that represents a single request
// for a remote resource. Implementing this interface gives the underlying
// type an opportunity to control properties such as whether the name of
// a request has particular properties, whether the request should be made
// to a specific namespace, and precisely which parameters should be included.
type RequestRemoteResource interface {
ResolverPayload() ResolverPayload
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import "context"
// contextKey is a unique type to map common request-scoped
// context information.
type contextKey struct{}
// requestNamespaceContextKey is the key stored in a context alongside
// the string namespace of a resolution request.
var requestNamespaceContextKey = contextKey{}
// InjectRequestNamespace returns a new context with a request-scoped
// namespace. This value may only be set once per request; subsequent
// calls with the same context or a derived context will be ignored.
func InjectRequestNamespace(ctx context.Context, namespace string) context.Context {
// Once set don't allow the value to be overwritten.
if val := ctx.Value(requestNamespaceContextKey); val != nil {
return ctx
}
return context.WithValue(ctx, requestNamespaceContextKey, namespace)
}
// RequestNamespace returns the namespace of the resolution request
// currently being processed or an empty string if the request somehow
// does not originate from a namespaced location.
func RequestNamespace(ctx context.Context) string {
if val := ctx.Value(requestNamespaceContextKey); val != nil {
if str, ok := val.(string); ok {
return str
}
}
return ""
}
// requestNameContextKey is the key stored in a context alongside
// the string name of a resolution request.
var requestNameContextKey = contextKey{}
// InjectRequestName returns a new context with a request-scoped
// name. This value may only be set once per request; subsequent
// calls with the same context or a derived context will be ignored.
func InjectRequestName(ctx context.Context, name string) context.Context {
// Once set don't allow the value to be overwritten.
if val := ctx.Value(requestNameContextKey); val != nil {
return ctx
}
return context.WithValue(ctx, requestNameContextKey, name)
}
// RequestName returns the name of the resolution request
// currently being processed or an empty string if none were registered.
func RequestName(ctx context.Context) string {
if val := ctx.Value(requestNameContextKey); val != nil {
if str, ok := val.(string); ok {
return str
}
}
return ""
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"context"
"errors"
"fmt"
"slices"
"strings"
"github.com/tektoncd/pipeline/pkg/reconciler/apiserver"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
// This error is defined in etcd at
// https://github.com/etcd-io/etcd/blob/5b226e0abf4100253c94bb71f47d6815877ed5a2/server/etcdserver/errors.go#L30
// TODO: If/when https://github.com/kubernetes/kubernetes/issues/106491 is addressed,
// we should stop relying on a hardcoded string.
var errEtcdLeaderChange = "etcdserver: leader changed"
// Error embeds both a short machine-readable string reason for resolution
// problems alongside the original error generated during the resolution flow.
type Error struct {
Reason string
Original error
}
var _ error = &Error{}
// Error returns the original error's message. This is intended to meet the error.Error interface.
func (e *Error) Error() string {
return e.Original.Error()
}
// Unwrap returns the original error without the Reason annotation. This is
// intended to support usage of errors.Is and errors.As with Errors.
func (e *Error) Unwrap() error {
return e.Original
}
// NewError returns a Error with the given reason and underlying
// original error.
func NewError(reason string, err error) *Error {
return &Error{
Reason: reason,
Original: err,
}
}
// ErrRequestInProgress is a sentinel value to indicate that
// a resource request is still in progress.
var ErrRequestInProgress = NewError("RequestInProgress", errors.New("Resource request is still in-progress"))
// InvalidResourceKeyError indicates that a string key given to the
// Reconcile function does not match the expected "name" or "namespace/name"
// format.
type InvalidResourceKeyError struct {
Key string
Original error
}
var _ error = &InvalidResourceKeyError{}
func (e *InvalidResourceKeyError) Error() string {
return fmt.Sprintf("invalid resource key %q: %v", e.Key, e.Original)
}
func (e *InvalidResourceKeyError) Unwrap() error {
return e.Original
}
// InvalidRequestError is an error received when a
// resource request is badly formed for some reason: either the
// parameters don't match the resolver's expectations or there is some
// other structural issue.
type InvalidRequestError struct {
ResolutionRequestKey string
Message string
}
var _ error = &InvalidRequestError{}
func (e *InvalidRequestError) Error() string {
return fmt.Sprintf("invalid resource request %q: %s", e.ResolutionRequestKey, e.Message)
}
// GetResourceError is an error received during what should
// otherwise have been a successful resource request.
type GetResourceError struct {
ResolverName string
Key string
Original error
}
var _ error = &GetResourceError{}
func (e *GetResourceError) Error() string {
return fmt.Sprintf("error getting %q %q: %v", e.ResolverName, e.Key, e.Original)
}
func (e *GetResourceError) Unwrap() error {
return e.Original
}
// UpdatingRequestError is an error during any part of the update
// process for a ResolutionRequest, e.g. when attempting to patch the
// ResolutionRequest with resolved data.
type UpdatingRequestError struct {
ResolutionRequestKey string
Original error
}
var _ error = &UpdatingRequestError{}
func (e *UpdatingRequestError) Error() string {
return fmt.Sprintf("error updating resource request %q with data: %v", e.ResolutionRequestKey, e.Original)
}
func (e *UpdatingRequestError) Unwrap() error {
return e.Original
}
// ReasonError extracts the reason and underlying error
// embedded in a given error or returns some sane defaults
// if the error isn't a common.Error.
func ReasonError(err error) (string, error) {
reason := ReasonResolutionFailed
resolutionError := err
var e *Error
if errors.As(err, &e) {
reason = e.Reason
resolutionError = e.Unwrap()
}
return reason, resolutionError
}
// IsErrTransient returns true if an error returned by GetTask/GetStepAction is retryable.
func IsErrTransient(err error) bool {
switch {
case apierrors.IsConflict(err), apierrors.IsServerTimeout(err), apierrors.IsTimeout(err), apierrors.IsTooManyRequests(err), errors.Is(err, apiserver.ErrCouldntValidateObjectRetryable):
return true
default:
return slices.ContainsFunc([]string{errEtcdLeaderChange, context.DeadlineExceeded.Error()}, func(s string) bool {
return strings.Contains(err.Error(), s)
})
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bundle
import (
"archive/tar"
"context"
"errors"
"fmt"
"io"
"strings"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
)
const (
// MaximumBundleObjects defines the maximum number of objects in a bundle
MaximumBundleObjects = 20
)
// RequestOptions are the options used to request a resource from
// a remote bundle.
type RequestOptions struct {
ServiceAccount string
ImagePullSecret string
Bundle string
EntryName string
Kind string
Cache string
}
// ResolvedResource wraps the content of a matched entry in a bundle.
type ResolvedResource struct {
data []byte
annotations map[string]string
source *pipelinev1.RefSource
}
var _ framework.ResolvedResource = &ResolvedResource{}
// Data returns the bytes of the resource fetched from the bundle.
func (br *ResolvedResource) Data() []byte {
return br.data
}
// Annotations returns the annotations from the bundle that are relevant
// to resolution.
func (br *ResolvedResource) Annotations() map[string]string {
return br.annotations
}
// RefSource is the source reference of the remote data that records where the remote
// file came from including the url, digest and the entrypoint.
func (br *ResolvedResource) RefSource() *pipelinev1.RefSource {
return br.source
}
// GetEntry accepts a keychain and options for the request and returns
// either a successfully resolved bundle entry or an error.
func GetEntry(ctx context.Context, keychain authn.Keychain, opts RequestOptions) (*ResolvedResource, error) {
uri, img, err := retrieveImage(ctx, keychain, opts.Bundle)
if err != nil {
return nil, fmt.Errorf("cannot retrieve the oci image: %w", err)
}
h, err := img.Digest()
if err != nil {
return nil, fmt.Errorf("cannot get the oci digest: %w", err)
}
manifest, err := img.Manifest()
if err != nil {
return nil, fmt.Errorf("could not parse image manifest: %w", err)
}
if err := checkImageCompliance(manifest); err != nil {
return nil, fmt.Errorf("invalid tekton bundle %s, error: %w", opts.Bundle, err)
}
layers, err := img.Layers()
if err != nil {
return nil, fmt.Errorf("could not read image layers: %w", err)
}
layerMap := map[string]v1.Layer{}
for _, l := range layers {
digest, err := l.Digest()
if err != nil {
return nil, fmt.Errorf("failed to find digest for layer: %w", err)
}
layerMap[digest.String()] = l
}
for idx, l := range manifest.Layers {
lKind := l.Annotations[BundleAnnotationKind]
lName := l.Annotations[BundleAnnotationName]
if strings.ToLower(opts.Kind) == strings.ToLower(lKind) && opts.EntryName == lName {
obj, err := readTarLayer(layerMap[l.Digest.String()])
if err != nil {
// This could still be a raw layer so try to read it as that instead.
obj, _ = readRawLayer(layers[idx])
}
return &ResolvedResource{
data: obj,
annotations: map[string]string{
ResolverAnnotationKind: lKind,
ResolverAnnotationName: lName,
ResolverAnnotationAPIVersion: l.Annotations[BundleAnnotationAPIVersion],
},
source: &pipelinev1.RefSource{
URI: uri,
Digest: map[string]string{
h.Algorithm: h.Hex,
},
EntryPoint: opts.EntryName,
},
}, nil
}
}
return nil, fmt.Errorf("could not find object in image with kind: %s and name: %s", opts.Kind, opts.EntryName)
}
// retrieveImage will fetch the image's url, contents and manifest.
func retrieveImage(ctx context.Context, keychain authn.Keychain, ref string) (string, v1.Image, error) {
imgRef, err := name.ParseReference(ref)
if err != nil {
return "", nil, fmt.Errorf("%s is an unparseable image reference: %w", ref, err)
}
customRetryBackoff, err := GetBundleResolverBackoff(ctx)
if err == nil {
img, err := remote.Image(imgRef, remote.WithAuthFromKeychain(keychain), remote.WithContext(ctx),
remote.WithRetryBackoff(customRetryBackoff))
return imgRef.Context().Name(), img, err
} else {
img, err := remote.Image(imgRef, remote.WithAuthFromKeychain(keychain), remote.WithContext(ctx))
return imgRef.Context().Name(), img, err
}
}
// checkImageCompliance will perform common checks to ensure the Tekton Bundle is compliant to our spec.
func checkImageCompliance(manifest *v1.Manifest) error {
// Check the manifest's layers to ensure there are a maximum of 10.
if len(manifest.Layers) > MaximumBundleObjects {
return fmt.Errorf("contained more than the maximum %d allow objects", MaximumBundleObjects)
}
// Ensure each layer complies to the spec.
for i, l := range manifest.Layers {
if _, ok := l.Annotations[BundleAnnotationAPIVersion]; !ok {
return fmt.Errorf("the layer %v does not contain a %s annotation", i, BundleAnnotationAPIVersion)
}
if _, ok := l.Annotations[BundleAnnotationName]; !ok {
return fmt.Errorf("the layer %v does not contain a %s annotation", i, BundleAnnotationName)
}
kind, ok := l.Annotations[BundleAnnotationKind]
if !ok {
return fmt.Errorf("the layer %v does not contain a %s annotation", i, BundleAnnotationKind)
}
if strings.TrimSuffix(strings.ToLower(kind), "s") != kind {
return fmt.Errorf("the layer %v the annotation %s must be lowercased and singular, found %s", i, BundleAnnotationKind, kind)
}
}
return nil
}
// Utility function to read out the contents of an image layer, assumed to be a tarball, as bytes.
func readTarLayer(layer v1.Layer) ([]byte, error) {
rc, err := layer.Uncompressed()
if err != nil {
return nil, fmt.Errorf("failed to read image layer: %w", err)
}
defer func() {
_ = rc.Close()
}()
// If the user bundled this up as a tar file then we need to untar it.
treader := tar.NewReader(rc)
header, err := treader.Next()
if err != nil {
return nil, errors.New("layer is not a tarball")
}
contents := make([]byte, header.Size)
if _, err := io.ReadFull(treader, contents); err != nil && err != io.EOF {
// We only allow 1 resource per layer so this tar bundle should have one and only one file.
return nil, fmt.Errorf("failed to read tar bundle: %w", err)
}
return contents, nil
}
// Utility function to read out the contents of an image layer, assumed to be raw bytes, as bytes.
func readRawLayer(layer v1.Layer) ([]byte, error) {
rc, err := layer.Uncompressed()
if err != nil {
return nil, fmt.Errorf("failed to read image layer: %w", err)
}
defer func() {
_ = rc.Close()
}()
contents, err := io.ReadAll(rc)
if err != nil {
return nil, fmt.Errorf("could not read contents of image layer: %w", err)
}
return contents, nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bundle
import (
"context"
"fmt"
"strconv"
"time"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
)
const (
// ConfigServiceAccount is the configuration field name for controlling
// the Service Account name to use for bundle requests.
ConfigServiceAccount = "default-service-account"
// ConfigKind is the configuration field name for controlling
// what the layer name in the bundle image is.
ConfigKind = "default-kind"
// ConfigTimeoutKey is the configuration field name for controlling
// the maximum duration of a resolution request for a file from registry.
ConfigTimeoutKey = "fetch-timeout"
// ConfigBackoffDuration is the configuration field name for controlling
// the initial duration of a backoff when a bundle resolution fails
ConfigBackoffDuration = "backoff-duration"
DefaultBackoffDuration = 2.0 * time.Second
// ConfigBackoffFactor is the configuration field name for controlling
// the factor by which successive backoffs will increase when a bundle
// resolution fails
ConfigBackoffFactor = "backoff-factor"
DefaultBackoffFactor = 2.0
// ConfigBackoffJitter is the configuration field name for controlling
// the randomness applied to backoff durations when a bundle resolution fails
ConfigBackoffJitter = "backoff-jitter"
DefaultBackoffJitter = 0.1
// ConfigBackoffSteps is the configuration field name for controlling
// the number of attempted backoffs to retry when a bundle resolution fails
ConfigBackoffSteps = "backoff-steps"
DefaultBackoffSteps = 2
// ConfigBackoffCap is the configuration field name for controlling
// the maximum duration to try when backing off
ConfigBackoffCap = "backoff-cap"
DefaultBackoffCap = 10 * time.Second
)
// GetBundleResolverBackoff returns a remote.Backoff to
// be passed when resolving remote images. This can be configured with the
// backoff-duration, backoff-factor, backoff-jitter, backoff-steps, and backoff-cap
// fields in the bundle-resolver-config ConfigMap.
func GetBundleResolverBackoff(ctx context.Context) (remote.Backoff, error) {
conf := framework.GetResolverConfigFromContext(ctx)
customRetryBackoff := remote.Backoff{
Duration: DefaultBackoffDuration,
Factor: DefaultBackoffFactor,
Jitter: DefaultBackoffJitter,
Steps: DefaultBackoffSteps,
Cap: DefaultBackoffCap,
}
if v, ok := conf[ConfigBackoffDuration]; ok {
var err error
duration, err := time.ParseDuration(v)
if err != nil {
return customRetryBackoff, fmt.Errorf("error parsing backoff duration value %s: %w", v, err)
}
customRetryBackoff.Duration = duration
}
if v, ok := conf[ConfigBackoffFactor]; ok {
var err error
factor, err := strconv.ParseFloat(v, 64)
if err != nil {
return customRetryBackoff, fmt.Errorf("error parsing backoff factor value %s: %w", v, err)
}
customRetryBackoff.Factor = factor
}
if v, ok := conf[ConfigBackoffJitter]; ok {
var err error
jitter, err := strconv.ParseFloat(v, 64)
if err != nil {
return customRetryBackoff, fmt.Errorf("error parsing backoff jitter value %s: %w", v, err)
}
customRetryBackoff.Jitter = jitter
}
if v, ok := conf[ConfigBackoffSteps]; ok {
var err error
steps, err := strconv.Atoi(v)
if err != nil {
return customRetryBackoff, fmt.Errorf("error parsing backoff steps value %s: %w", v, err)
}
customRetryBackoff.Steps = steps
}
if v, ok := conf[ConfigBackoffCap]; ok {
var err error
cap, err := time.ParseDuration(v)
if err != nil {
return customRetryBackoff, fmt.Errorf("error parsing backoff steps value %s: %w", v, err)
}
customRetryBackoff.Cap = cap
}
return customRetryBackoff, nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bundle
import (
"context"
"errors"
"fmt"
"github.com/google/go-containerregistry/pkg/name"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"github.com/tektoncd/pipeline/pkg/resolution/resource"
)
// ParamServiceAccount is the parameter defining what service
// account name to use for bundle requests.
const ParamServiceAccount = "serviceAccount"
// ParamImagePullSecret is the parameter defining what secret
// name to use for bundle requests.
const ParamImagePullSecret = "secret"
// ParamBundle is the parameter defining what the bundle image url is.
const ParamBundle = "bundle"
// ParamName is the parameter defining what the layer name in the bundle
// image is.
const ParamName = resource.ParamName
// ParamKind is the parameter defining what the layer kind in the bundle
// image is.
const ParamKind = "kind"
// paramCache is the parameter defining whether to use cache for bundle requests.
const paramCache = "cache"
// OptionsFromParams parses the params from a resolution request and
// converts them into options to pass as part of a bundle request.
func OptionsFromParams(ctx context.Context, params []pipelinev1.Param) (RequestOptions, error) {
opts := RequestOptions{}
conf := framework.GetResolverConfigFromContext(ctx)
paramsMap := make(map[string]pipelinev1.ParamValue)
for _, p := range params {
paramsMap[p.Name] = p.Value
}
saVal, ok := paramsMap[ParamServiceAccount]
sa := ""
if !ok || saVal.StringVal == "" {
if saString, ok := conf[ConfigServiceAccount]; ok {
sa = saString
} else {
return opts, errors.New("default Service Account was not set during installation of the bundle resolver")
}
} else {
sa = saVal.StringVal
}
bundleVal, ok := paramsMap[ParamBundle]
if !ok || bundleVal.StringVal == "" {
return opts, fmt.Errorf("parameter %q required", ParamBundle)
}
if _, err := name.ParseReference(bundleVal.StringVal); err != nil {
return opts, fmt.Errorf("invalid bundle reference: %w", err)
}
nameVal, ok := paramsMap[ParamName]
if !ok || nameVal.StringVal == "" {
return opts, fmt.Errorf("parameter %q required", ParamName)
}
kindVal, ok := paramsMap[ParamKind]
kind := ""
if !ok || kindVal.StringVal == "" {
if kindString, ok := conf[ConfigKind]; ok {
kind = kindString
} else {
return opts, errors.New("default resource Kind was not set during installation of the bundle resolver")
}
} else {
kind = kindVal.StringVal
}
opts.ServiceAccount = sa
opts.ImagePullSecret = paramsMap[ParamImagePullSecret].StringVal
opts.Bundle = bundleVal.StringVal
opts.EntryName = nameVal.StringVal
opts.Kind = kind
// Use default cache mode since validation happens centrally in framework
if cacheVal, ok := paramsMap[paramCache]; ok && cacheVal.StringVal != "" {
opts.Cache = cacheVal.StringVal
} else {
opts.Cache = "auto"
}
return opts, nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bundle
import (
"context"
"errors"
"fmt"
"time"
"github.com/google/go-containerregistry/pkg/authn/k8schain"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
common "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"k8s.io/client-go/kubernetes"
"knative.dev/pkg/client/injection/kube/client"
)
const (
disabledError = "cannot handle resolution request, enable-bundles-resolver feature flag not true"
// LabelValueBundleResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
LabelValueBundleResolverType string = "bundles"
// BundleResolverName is the name that the bundle resolver should be associated with.
BundleResolverName = "bundleresolver"
// ConfigMapName is the bundle resolver's config map
ConfigMapName = "bundleresolver-config"
)
var _ framework.ConfigWatcher = &Resolver{}
// GetConfigName returns the name of the git resolver's configmap.
func (r *Resolver) GetConfigName(context.Context) string {
return ConfigMapName
}
var _ framework.TimedResolution = &Resolver{}
// Resolver implements a framework.Resolver that can fetch files from OCI bundles.
//
// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/bundle.Resolver] instead.
type Resolver struct {
kubeClientSet kubernetes.Interface
}
// Initialize sets up any dependencies needed by the Resolver. None atm.
func (r *Resolver) Initialize(ctx context.Context) error {
r.kubeClientSet = client.Get(ctx)
return nil
}
// GetName returns a string name to refer to this Resolver by.
func (r *Resolver) GetName(context.Context) string {
return BundleResolverName
}
// GetSelector returns a map of labels to match requests to this Resolver.
func (r *Resolver) GetSelector(context.Context) map[string]string {
return map[string]string{
common.LabelKeyResolverType: LabelValueBundleResolverType,
}
}
// ValidateParams ensures parameters from a request are as expected.
func (r *Resolver) ValidateParams(ctx context.Context, params []v1.Param) error {
return ValidateParams(ctx, params)
}
// Resolve uses the given params to resolve the requested file or resource.
func (r *Resolver) Resolve(ctx context.Context, params []v1.Param) (framework.ResolvedResource, error) {
return ResolveRequest(ctx, r.kubeClientSet, &v1beta1.ResolutionRequestSpec{Params: params})
}
// Resolve uses the given params to resolve the requested file or resource.
func ResolveRequest(ctx context.Context, kubeClientSet kubernetes.Interface, req *v1beta1.ResolutionRequestSpec) (framework.ResolvedResource, error) {
if isDisabled(ctx) {
return nil, errors.New(disabledError)
}
opts, err := OptionsFromParams(ctx, req.Params)
if err != nil {
return nil, err
}
var imagePullSecrets []string
if opts.ImagePullSecret != "" {
imagePullSecrets = append(imagePullSecrets, opts.ImagePullSecret)
}
namespace := common.RequestNamespace(ctx)
kc, err := k8schain.New(ctx, kubeClientSet, k8schain.Options{
Namespace: namespace,
ServiceAccountName: opts.ServiceAccount,
ImagePullSecrets: imagePullSecrets,
})
if err != nil {
return nil, err
}
return GetEntry(ctx, kc, opts)
}
func ValidateParams(ctx context.Context, params []v1.Param) error {
if isDisabled(ctx) {
return errors.New(disabledError)
}
if _, err := OptionsFromParams(ctx, params); err != nil {
return err
}
return nil
}
func isDisabled(ctx context.Context) bool {
cfg := resolverconfig.FromContextOrDefaults(ctx)
return !cfg.FeatureFlags.EnableBundleResolver
}
// GetResolutionTimeout returns a time.Duration for the amount of time a
// single bundle fetch may take. This can be configured with the
// fetch-timeout field in the bundle-resolver-config ConfigMap.
func (r *Resolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration, params map[string]string) (time.Duration, error) {
conf := framework.GetResolverConfigFromContext(ctx)
timeout := defaultTimeout
if v, ok := conf[ConfigTimeoutKey]; ok {
var err error
timeout, err = time.ParseDuration(v)
if err != nil {
return time.Duration(0), fmt.Errorf("error parsing bundle timeout value %s: %w", v, err)
}
}
return timeout, nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"context"
"encoding/hex"
"errors"
"fmt"
"slices"
"strings"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client"
common "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/logging"
"sigs.k8s.io/yaml"
)
const (
disabledError = "cannot handle resolution request, enable-cluster-resolver feature flag not true"
// LabelValueClusterResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
LabelValueClusterResolverType string = "cluster"
// ClusterResolverName is the name that the cluster resolver should be
// associated with
ClusterResolverName string = "Cluster"
ConfigMapName = "cluster-resolver-config"
)
var supportedKinds = []string{"task", "pipeline", "stepaction"}
var _ framework.Resolver = (*Resolver)(nil)
var _ framework.ConfigWatcher = (*Resolver)(nil)
// Resolver implements a framework.Resolver that can fetch resources from other namespaces.
//
// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/cluster.Resolver] instead.
type Resolver struct {
pipelineClientSet clientset.Interface
}
// Initialize performs any setup required by the cluster resolver.
func (r *Resolver) Initialize(ctx context.Context) error {
r.pipelineClientSet = pipelineclient.Get(ctx)
return nil
}
// GetName returns the string name that the cluster resolver should be
// associated with.
func (r *Resolver) GetName(_ context.Context) string {
return ClusterResolverName
}
// GetSelector returns the labels that resource requests are required to have for
// the cluster resolver to process them.
func (r *Resolver) GetSelector(_ context.Context) map[string]string {
return map[string]string{
common.LabelKeyResolverType: LabelValueClusterResolverType,
}
}
// GetConfigName returns the name of the cluster resolver's configmap.
func (r *Resolver) GetConfigName(context.Context) string {
return ConfigMapName
}
// ValidateParams returns an error if the given parameter map is not
// valid for a resource request targeting the cluster resolver.
func (r *Resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
return ValidateParams(ctx, params)
}
// Resolve performs the work of fetching a resource from a namespace with the given
// parameters.
func (r *Resolver) Resolve(ctx context.Context, origParams []pipelinev1.Param) (framework.ResolvedResource, error) {
return ResolveFromParams(ctx, origParams, r.pipelineClientSet)
}
func ResolveFromParams(ctx context.Context, origParams []pipelinev1.Param, pipelineClientSet clientset.Interface) (framework.ResolvedResource, error) {
if isDisabled(ctx) {
return nil, errors.New(disabledError)
}
logger := logging.FromContext(ctx)
params, err := populateParamsWithDefaults(ctx, origParams)
if err != nil {
logger.Infof("cluster resolver parameter(s) invalid: %v", err)
return nil, err
}
var data []byte
var spec []byte
var sha256Checksum []byte
var uid string
groupVersion := pipelinev1.SchemeGroupVersion.String()
switch params[KindParam] {
case "stepaction":
stepaction, err := pipelineClientSet.TektonV1beta1().StepActions(params[NamespaceParam]).Get(ctx, params[NameParam], metav1.GetOptions{})
if err != nil {
logger.Infof("failed to load stepaction %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return nil, err
}
uid, data, sha256Checksum, spec, err = fetchStepaction(ctx, pipelinev1beta1.SchemeGroupVersion.String(), stepaction, params)
if err != nil {
return nil, err
}
case "task":
task, err := pipelineClientSet.TektonV1().Tasks(params[NamespaceParam]).Get(ctx, params[NameParam], metav1.GetOptions{})
if err != nil {
logger.Infof("failed to load task %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return nil, err
}
uid, data, sha256Checksum, spec, err = fetchTask(ctx, groupVersion, task, params)
if err != nil {
return nil, err
}
case "pipeline":
pipeline, err := pipelineClientSet.TektonV1().Pipelines(params[NamespaceParam]).Get(ctx, params[NameParam], metav1.GetOptions{})
if err != nil {
logger.Infof("failed to load pipeline %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return nil, err
}
uid, data, sha256Checksum, spec, err = fetchPipeline(ctx, groupVersion, pipeline, params)
if err != nil {
return nil, err
}
default:
logger.Infof("unknown or invalid resource kind %s", params[KindParam])
return nil, fmt.Errorf("unknown or invalid resource kind %s", params[KindParam])
}
return &ResolvedClusterResource{
Content: data,
Spec: spec,
Name: params[NameParam],
Namespace: params[NamespaceParam],
Identifier: fmt.Sprintf("/apis/%s/namespaces/%s/%s/%s@%s", groupVersion, params[NamespaceParam], params[KindParam], params[NameParam], uid),
Checksum: sha256Checksum,
}, nil
}
// ResolvedClusterResource implements framework.ResolvedResource and returns
// the resolved file []byte data and an annotation map for any metadata.
type ResolvedClusterResource struct {
// Content is the actual resolved resource data.
Content []byte
// Spec is the data in the resolved task/pipeline CRD spec.
Spec []byte
// Name is the resolved resource name in the cluster
Name string
// Namespace is the namespace in the cluster under which the resolved resource was created.
Namespace string
// Identifier is the unique identifier for the resource in the cluster.
// It is in the format of <resource uri>@<uid>.
// Resource URI is the namespace-scoped uri i.e. /apis/GROUP/VERSION/namespaces/NAMESPACE/RESOURCETYPE/NAME.
// https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-uris
Identifier string
// Sha256 Checksum of the cluster resource
Checksum []byte
}
var _ framework.ResolvedResource = &ResolvedClusterResource{}
// Data returns the bytes of the file resolved from git.
func (r *ResolvedClusterResource) Data() []byte {
return r.Content
}
// Annotations returns the metadata that accompanies the resource fetched from the cluster.
func (r *ResolvedClusterResource) Annotations() map[string]string {
return map[string]string{
ResourceNameAnnotation: r.Name,
ResourceNamespaceAnnotation: r.Namespace,
}
}
// RefSource is the source reference of the remote data that records where the remote
// file came from including the url, digest and the entrypoint.
func (r ResolvedClusterResource) RefSource() *pipelinev1.RefSource {
return &pipelinev1.RefSource{
URI: r.Identifier,
Digest: map[string]string{
"sha256": hex.EncodeToString(r.Checksum),
},
}
}
func populateParamsWithDefaults(ctx context.Context, origParams []pipelinev1.Param) (map[string]string, error) {
conf := framework.GetResolverConfigFromContext(ctx)
paramsMap := make(map[string]pipelinev1.ParamValue)
for _, p := range origParams {
paramsMap[p.Name] = p.Value
}
params := make(map[string]string)
var missingParams []string
if pKind, ok := paramsMap[KindParam]; !ok || pKind.StringVal == "" {
if kindVal, ok := conf[DefaultKindKey]; !ok {
missingParams = append(missingParams, KindParam)
} else {
params[KindParam] = kindVal
}
} else {
params[KindParam] = pKind.StringVal
}
if kindVal, ok := params[KindParam]; ok && !isSupportedKind(kindVal) {
return nil, fmt.Errorf("unknown or unsupported resource kind '%s'", kindVal)
}
if pName, ok := paramsMap[NameParam]; !ok || pName.StringVal == "" {
missingParams = append(missingParams, NameParam)
} else {
params[NameParam] = pName.StringVal
}
if pNS, ok := paramsMap[NamespaceParam]; !ok || pNS.StringVal == "" {
if nsVal, ok := conf[DefaultNamespaceKey]; !ok {
missingParams = append(missingParams, NamespaceParam)
} else {
params[NamespaceParam] = nsVal
}
} else {
params[NamespaceParam] = pNS.StringVal
}
if len(missingParams) > 0 {
return nil, fmt.Errorf("missing required cluster resolver params: %s", strings.Join(missingParams, ", "))
}
if conf[BlockedNamespacesKey] != "" && isInCommaSeparatedList(params[NamespaceParam], conf[BlockedNamespacesKey]) {
return nil, fmt.Errorf("access to specified namespace %s is blocked", params[NamespaceParam])
}
if conf[AllowedNamespacesKey] != "" && isInCommaSeparatedList(params[NamespaceParam], conf[AllowedNamespacesKey]) {
return params, nil
}
if conf[BlockedNamespacesKey] != "" && conf[BlockedNamespacesKey] == "*" {
return nil, errors.New("only explicit allowed access to namespaces is allowed")
}
if conf[AllowedNamespacesKey] != "" && !isInCommaSeparatedList(params[NamespaceParam], conf[AllowedNamespacesKey]) {
return nil, fmt.Errorf("access to specified namespace %s is not allowed", params[NamespaceParam])
}
return params, nil
}
func isInCommaSeparatedList(checkVal string, commaList string) bool {
for _, s := range strings.Split(commaList, ",") {
if s == checkVal {
return true
}
}
return false
}
func isDisabled(ctx context.Context) bool {
cfg := resolverconfig.FromContextOrDefaults(ctx)
return !cfg.FeatureFlags.EnableClusterResolver
}
func ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
if isDisabled(ctx) {
return errors.New(disabledError)
}
_, err := populateParamsWithDefaults(ctx, params)
return err
}
func fetchStepaction(ctx context.Context, groupVersion string, stepaction *pipelinev1beta1.StepAction, params map[string]string) (string, []byte, []byte, []byte, error) {
logger := logging.FromContext(ctx)
uid := string(stepaction.UID)
stepaction.Kind = "StepAction"
stepaction.APIVersion = groupVersion
data, err := yaml.Marshal(stepaction)
if err != nil {
logger.Infof("failed to marshal stepaction %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return "", nil, nil, nil, err
}
sha256Checksum, err := stepaction.Checksum()
if err != nil {
return "", nil, nil, nil, err
}
spec, err := yaml.Marshal(stepaction.Spec)
if err != nil {
logger.Infof("failed to marshal the spec of the task %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return "", nil, nil, nil, err
}
return uid, data, sha256Checksum, spec, nil
}
func fetchTask(ctx context.Context, groupVersion string, task *pipelinev1.Task, params map[string]string) (string, []byte, []byte, []byte, error) {
logger := logging.FromContext(ctx)
uid := string(task.UID)
task.Kind = "Task"
task.APIVersion = groupVersion
data, err := yaml.Marshal(task)
if err != nil {
logger.Infof("failed to marshal task %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return "", nil, nil, nil, err
}
sha256Checksum, err := task.Checksum()
if err != nil {
return "", nil, nil, nil, err
}
spec, err := yaml.Marshal(task.Spec)
if err != nil {
logger.Infof("failed to marshal the spec of the task %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return "", nil, nil, nil, err
}
return uid, data, sha256Checksum, spec, nil
}
func fetchPipeline(ctx context.Context, groupVersion string, pipeline *pipelinev1.Pipeline, params map[string]string) (string, []byte, []byte, []byte, error) {
logger := logging.FromContext(ctx)
uid := string(pipeline.UID)
pipeline.Kind = "Pipeline"
pipeline.APIVersion = groupVersion
data, err := yaml.Marshal(pipeline)
if err != nil {
logger.Infof("failed to marshal pipeline %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return "", nil, nil, nil, err
}
sha256Checksum, err := pipeline.Checksum()
if err != nil {
return "", nil, nil, nil, err
}
spec, err := yaml.Marshal(pipeline.Spec)
if err != nil {
logger.Infof("failed to marshal the spec of the pipeline %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return "", nil, nil, nil, err
}
return uid, data, sha256Checksum, spec, nil
}
func isSupportedKind(kindValue string) bool {
return slices.Contains[[]string, string](supportedKinds, kindValue)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
corev1 "k8s.io/api/core/v1"
"knative.dev/pkg/configmap"
)
// resolverConfigKey is the context key associated with configuration
// for one specific resolver, and is only used if that resolver
// implements the optional framework.ConfigWatcher interface.
var resolverConfigKey = struct{}{}
// DataFromConfigMap returns a copy of the contents of a configmap or an
// empty map if the configmap doesn't have any data.
func DataFromConfigMap(config *corev1.ConfigMap) (map[string]string, error) {
resolverConfig := map[string]string{}
if config == nil {
return resolverConfig, nil
}
for key, value := range config.Data {
resolverConfig[key] = value
}
return resolverConfig, nil
}
// ConfigStore wraps a knative untyped store and provides helper methods
// for working with a resolver's configuration data.
type ConfigStore struct {
*resolverconfig.Store
resolverConfigName string
untyped *configmap.UntypedStore
}
// NewConfigStore creates a new untyped store for the resolver's configuration and a config.Store for general Pipeline configuration.
func NewConfigStore(resolverConfigName string, logger configmap.Logger) *ConfigStore {
return &ConfigStore{
Store: resolverconfig.NewStore(logger),
resolverConfigName: resolverConfigName,
untyped: configmap.NewUntypedStore(
"resolver-config",
logger,
configmap.Constructors{
resolverConfigName: DataFromConfigMap,
},
),
}
}
// WatchConfigs uses the provided configmap.Watcher
// to setup watches for the config names provided in the
// Constructors map
func (store *ConfigStore) WatchConfigs(w configmap.Watcher) {
store.untyped.WatchConfigs(w)
store.Store.WatchConfigs(w)
}
// GetResolverConfig returns a copy of the resolver's current
// configuration or an empty map if the stored config is nil or invalid.
func (store *ConfigStore) GetResolverConfig() map[string]string {
resolverConfig := map[string]string{}
untypedConf := store.untyped.UntypedLoad(store.resolverConfigName)
if conf, ok := untypedConf.(map[string]string); ok {
for key, val := range conf {
resolverConfig[key] = val
}
}
return resolverConfig
}
// ToContext returns a new context with the resolver's configuration
// data stored in it.
func (store *ConfigStore) ToContext(ctx context.Context) context.Context {
conf := store.GetResolverConfig()
return InjectResolverConfigToContext(store.Store.ToContext(ctx), conf)
}
// InjectResolverConfigToContext returns a new context with a
// map stored in it for a resolvers config.
func InjectResolverConfigToContext(ctx context.Context, conf map[string]string) context.Context {
return context.WithValue(ctx, resolverConfigKey, conf)
}
// GetResolverConfigFromContext returns any resolver-specific
// configuration that has been stored or an empty map if none exists.
func GetResolverConfigFromContext(ctx context.Context) map[string]string {
conf := map[string]string{}
storedConfig := ctx.Value(resolverConfigKey)
if resolverConfig, ok := storedConfig.(map[string]string); ok {
conf = resolverConfig
}
return conf
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"fmt"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client"
rrinformer "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest"
rrlister "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/resolution/common"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/clock"
kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
"knative.dev/pkg/reconciler"
)
// ReconcilerModifier is a func that can access and modify a reconciler
// in the moments before a resolver is started. It allows for
// things like injecting a test clock.
type ReconcilerModifier = func(reconciler *Reconciler)
// NewController returns a knative controller for a Tekton Resolver.
// This sets up a lot of the boilerplate that individual resolvers
// shouldn't need to be concerned with since it's common to all of them.
func NewController(ctx context.Context, resolver Resolver, modifiers ...ReconcilerModifier) func(context.Context, configmap.Watcher) *controller.Impl {
if err := ValidateResolver(ctx, resolver.GetSelector(ctx)); err != nil {
panic(err.Error())
}
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
logger := logging.FromContext(ctx)
kubeclientset := kubeclient.Get(ctx)
rrclientset := rrclient.Get(ctx)
rrInformer := rrinformer.Get(ctx)
if err := resolver.Initialize(ctx); err != nil {
panic(err.Error())
}
r := &Reconciler{
LeaderAwareFuncs: LeaderAwareFuncs(rrInformer.Lister()),
kubeClientSet: kubeclientset,
resolutionRequestLister: rrInformer.Lister(),
resolutionRequestClientSet: rrclientset,
resolver: resolver,
}
watchConfigChanges(ctx, r, cmw)
// TODO(sbwsg): Do better sanitize.
resolverName := resolver.GetName(ctx)
resolverName = strings.ReplaceAll(resolverName, "/", "")
resolverName = strings.ReplaceAll(resolverName, " ", "")
applyModifiersAndDefaults(ctx, r, modifiers)
impl := controller.NewContext(ctx, r, controller.ControllerOptions{
WorkQueueName: "TektonResolverFramework." + resolverName,
Logger: logger,
})
_, err := rrInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: FilterResolutionRequestsBySelector(resolver.GetSelector(ctx)),
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: impl.Enqueue,
UpdateFunc: func(oldObj, newObj interface{}) {
impl.Enqueue(newObj)
},
// TODO(sbwsg): should we deliver delete events
// to the resolver?
// DeleteFunc: impl.Enqueue,
},
})
if err != nil {
logging.FromContext(ctx).Panicf("Couldn't register ResolutionRequest informer event handler: %w", err)
}
return impl
}
}
// watchConfigChanges binds a framework.Resolver to updates on its
// configmap, using knative's configmap helpers. This is only done if
// the resolver implements the framework.ConfigWatcher interface.
func watchConfigChanges(ctx context.Context, reconciler *Reconciler, cmw configmap.Watcher) {
if configWatcher, ok := reconciler.resolver.(ConfigWatcher); ok {
logger := logging.FromContext(ctx)
resolverConfigName := configWatcher.GetConfigName(ctx)
if resolverConfigName == "" {
panic("resolver returned empty config name")
}
reconciler.configStore = NewConfigStore(resolverConfigName, logger)
reconciler.configStore.WatchConfigs(cmw)
}
}
// applyModifiersAndDefaults applies the given modifiers to
// a reconciler and, after doing so, sets any default values for things
// that weren't set by a modifier.
func applyModifiersAndDefaults(ctx context.Context, r *Reconciler, modifiers []ReconcilerModifier) {
for _, mod := range modifiers {
mod(r)
}
if r.Clock == nil {
r.Clock = clock.RealClock{}
}
}
func FilterResolutionRequestsBySelector(selector map[string]string) func(obj interface{}) bool {
return func(obj interface{}) bool {
rr, ok := obj.(*v1beta1.ResolutionRequest)
if !ok {
return false
}
if len(rr.ObjectMeta.Labels) == 0 {
return false
}
for key, val := range selector {
lookup, has := rr.ObjectMeta.Labels[key]
if !has {
return false
}
if lookup != val {
return false
}
}
return true
}
}
// TODO(sbwsg): I don't really understand the LeaderAwareness types beyond the
// fact that the controller crashes if they're missing. It looks
// like this is bucketing based on labels. Should we use the filter
// selector from above in the call to lister.List here?
func LeaderAwareFuncs(lister rrlister.ResolutionRequestLister) reconciler.LeaderAwareFuncs {
return reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
}
}
// ErrMissingTypeSelector is returned when a resolver does not return
// a selector with a type label from its GetSelector method.
var ErrMissingTypeSelector = fmt.Errorf("invalid resolver: minimum selector must include %q", common.LabelKeyResolverType)
func ValidateResolver(ctx context.Context, sel map[string]string) error {
if sel == nil {
return ErrMissingTypeSelector
}
if sel[common.LabelKeyResolverType] == "" {
return ErrMissingTypeSelector
}
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"errors"
"fmt"
"strings"
"time"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
)
const (
// LabelValueFakeResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
LabelValueFakeResolverType string = "fake"
// FakeResolverName is the name that the fake resolver should be
// associated with
FakeResolverName string = "Fake"
// FakeParamName is the name used for the fake resolver's single parameter.
FakeParamName string = "fake-key"
)
var _ Resolver = &FakeResolver{}
// FakeResolvedResource is a framework.ResolvedResource implementation for use with the fake resolver.
// If it's the value in the FakeResolver's ForParam map for the key given as the fake param value, the FakeResolver will
// first check if it's got a value for ErrorWith. If so, that string will be returned as an error. Then, if WaitFor is
// greater than zero, the FakeResolver will wait that long before returning. And finally, the FakeResolvedResource will
// be returned.
type FakeResolvedResource struct {
Content string
AnnotationMap map[string]string
ContentSource *pipelinev1.RefSource
ErrorWith string
WaitFor time.Duration
}
// Data returns the FakeResolvedResource's Content field as bytes.
func (f *FakeResolvedResource) Data() []byte {
return []byte(f.Content)
}
// Annotations returns the FakeResolvedResource's AnnotationMap field.
func (f *FakeResolvedResource) Annotations() map[string]string {
return f.AnnotationMap
}
// RefSource is the source reference of the remote data that records where the remote
// file came from including the url, digest and the entrypoint.
func (f *FakeResolvedResource) RefSource() *pipelinev1.RefSource {
return f.ContentSource
}
// FakeResolver implements a framework.Resolver that can fetch pre-configured strings based on a parameter value, or return
// resolution attempts with a configured error.
type FakeResolver struct {
ForParam map[string]*FakeResolvedResource
Timeout time.Duration
}
// Initialize performs any setup required by the fake resolver.
func (r *FakeResolver) Initialize(ctx context.Context) error {
if r.ForParam == nil {
r.ForParam = make(map[string]*FakeResolvedResource)
}
return nil
}
// GetName returns the string name that the fake resolver should be
// associated with.
func (r *FakeResolver) GetName(_ context.Context) string {
return FakeResolverName
}
// GetSelector returns the labels that resource requests are required to have for
// the fake resolver to process them.
func (r *FakeResolver) GetSelector(_ context.Context) map[string]string {
return map[string]string{
resolutioncommon.LabelKeyResolverType: LabelValueFakeResolverType,
}
}
// ValidateParams returns an error if the given parameter map is not
// valid for a resource request targeting the fake resolver.
func (r *FakeResolver) ValidateParams(_ context.Context, params []pipelinev1.Param) error {
return ValidateParams(params)
}
func ValidateParams(params []pipelinev1.Param) error {
paramsMap := make(map[string]pipelinev1.ParamValue)
for _, p := range params {
paramsMap[p.Name] = p.Value
}
required := []string{
FakeParamName,
}
missing := []string{}
if params == nil {
missing = required
} else {
for _, p := range required {
v, has := paramsMap[p]
if !has || v.StringVal == "" {
missing = append(missing, p)
}
}
}
if len(missing) > 0 {
return fmt.Errorf("missing %v", strings.Join(missing, ", "))
}
return nil
}
// Resolve performs the work of fetching a file from the fake resolver given a map of
// parameters.
func (r *FakeResolver) Resolve(_ context.Context, params []pipelinev1.Param) (ResolvedResource, error) {
return Resolve(params, r.ForParam)
}
func Resolve(params []pipelinev1.Param, forParam map[string]*FakeResolvedResource) (ResolvedResource, error) {
paramsMap := make(map[string]pipelinev1.ParamValue)
for _, p := range params {
paramsMap[p.Name] = p.Value
}
paramValue := paramsMap[FakeParamName].StringVal
frr, ok := forParam[paramValue]
if !ok {
return nil, fmt.Errorf("couldn't find resource for param value %s", paramValue)
}
if frr.ErrorWith != "" {
return nil, errors.New(frr.ErrorWith)
}
if frr.WaitFor.Seconds() > 0 {
time.Sleep(frr.WaitFor)
}
return frr, nil
}
var _ TimedResolution = &FakeResolver{}
// GetResolutionTimeout returns the configured timeout for the reconciler, or the default time.Duration if not configured.
func (r *FakeResolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration, params map[string]string) (time.Duration, error) {
return GetResolutionTimeout(r.Timeout, defaultTimeout), nil
}
// GetResolutionTimeout returns the input timeout if set to something greater than 0 or the default time.Duration if not configured.
func GetResolutionTimeout(timeout, defaultTimeout time.Duration) time.Duration {
if timeout > 0 {
return timeout
}
return defaultTimeout
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"time"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
rrv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/clock"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
"knative.dev/pkg/reconciler"
)
// Reconciler handles ResolutionRequest objects, performs functionality
// common to all resolvers and delegates resolver-specific actions
// to its embedded type-specific Resolver object.
//
// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework.Reconciler] instead.
type Reconciler struct {
// Implements reconciler.LeaderAware
reconciler.LeaderAwareFuncs
// Clock is used by the reconciler to track the passage of time
// and can be overridden for tests.
Clock clock.PassiveClock
resolver Resolver
kubeClientSet kubernetes.Interface
resolutionRequestLister rrv1beta1.ResolutionRequestLister
resolutionRequestClientSet rrclient.Interface
configStore *ConfigStore
}
var _ reconciler.LeaderAware = &Reconciler{}
// defaultMaximumResolutionDuration is the maximum amount of time
// resolution may take.
// defaultMaximumResolutionDuration is the max time that a call to
// Resolve() may take. It can be overridden by a resolver implementing
// the framework.TimedResolution interface.
const defaultMaximumResolutionDuration = time.Minute
// Reconcile receives the string key of a ResolutionRequest object, looks
// it up, checks it for common errors, and then delegates
// resolver-specific functionality to the reconciler's embedded
// type-specific resolver. Any errors that occur during validation or
// resolution are handled by updating or failing the ResolutionRequest.
func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
err = &resolutioncommon.InvalidResourceKeyError{Key: key, Original: err}
return controller.NewPermanentError(err)
}
rr, err := r.resolutionRequestLister.ResolutionRequests(namespace).Get(name)
if err != nil {
err := &resolutioncommon.GetResourceError{ResolverName: "resolutionrequest", Key: key, Original: err}
return controller.NewPermanentError(err)
}
if rr.IsDone() {
return nil
}
// Inject request-scoped information into the context, such as
// the namespace that the request originates from and the
// configuration from the configmap this resolver is watching.
ctx = resolutioncommon.InjectRequestNamespace(ctx, namespace)
ctx = resolutioncommon.InjectRequestName(ctx, name)
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
return r.resolve(ctx, key, rr)
}
func (r *Reconciler) resolve(ctx context.Context, key string, rr *v1beta1.ResolutionRequest) error {
errChan := make(chan error)
resourceChan := make(chan ResolvedResource)
paramsMap := make(map[string]string)
for _, p := range rr.Spec.Params {
paramsMap[p.Name] = p.Value.StringVal
}
timeoutDuration := defaultMaximumResolutionDuration
if timed, ok := r.resolver.(TimedResolution); ok {
var err error
timeoutDuration, err = timed.GetResolutionTimeout(ctx, defaultMaximumResolutionDuration, paramsMap)
if err != nil {
return err
}
}
// A new context is created for resolution so that timeouts can
// be enforced without affecting other uses of ctx (e.g. sending
// Updates to ResolutionRequest objects).
resolutionCtx, cancelFn := context.WithTimeout(ctx, timeoutDuration)
defer cancelFn()
go func() {
validationError := r.resolver.ValidateParams(resolutionCtx, rr.Spec.Params)
if validationError != nil {
errChan <- &resolutioncommon.InvalidRequestError{
ResolutionRequestKey: key,
Message: validationError.Error(),
}
return
}
resource, resolveErr := r.resolver.Resolve(resolutionCtx, rr.Spec.Params)
if resolveErr != nil {
errChan <- &resolutioncommon.GetResourceError{
ResolverName: r.resolver.GetName(resolutionCtx),
Key: key,
Original: resolveErr,
}
return
}
resourceChan <- resource
}()
select {
case err := <-errChan:
if err != nil {
return r.OnError(ctx, rr, err)
}
case <-resolutionCtx.Done():
if err := resolutionCtx.Err(); err != nil {
return r.OnError(ctx, rr, err)
}
case resource := <-resourceChan:
return r.writeResolvedData(ctx, rr, resource)
}
return errors.New("unknown error")
}
// OnError is used to handle any situation where a ResolutionRequest has
// reached a terminal situation that cannot be recovered from.
func (r *Reconciler) OnError(ctx context.Context, rr *v1beta1.ResolutionRequest, err error) error {
if rr == nil {
return controller.NewPermanentError(err)
}
if err != nil {
_ = r.MarkFailed(ctx, rr, err)
return controller.NewPermanentError(err)
}
return nil
}
// MarkFailed updates a ResolutionRequest as having failed. It returns
// errors that occur during the update process or nil if the update
// appeared to succeed.
func (r *Reconciler) MarkFailed(ctx context.Context, rr *v1beta1.ResolutionRequest, resolutionErr error) error {
key := fmt.Sprintf("%s/%s", rr.Namespace, rr.Name)
reason, resolutionErr := resolutioncommon.ReasonError(resolutionErr)
latestGeneration, err := r.resolutionRequestClientSet.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Get(ctx, rr.Name, metav1.GetOptions{})
if err != nil {
logging.FromContext(ctx).Warnf("error getting latest generation of resolutionrequest %q: %v", key, err)
return err
}
if latestGeneration.IsDone() {
return nil
}
latestGeneration.Status.MarkFailed(reason, resolutionErr.Error())
_, err = r.resolutionRequestClientSet.ResolutionV1beta1().ResolutionRequests(rr.Namespace).UpdateStatus(ctx, latestGeneration, metav1.UpdateOptions{})
if err != nil {
logging.FromContext(ctx).Warnf("error marking resolutionrequest %q as failed: %v", key, err)
return err
}
return nil
}
// statusDataPatch is the json structure that will be PATCHed into
// a ResolutionRequest with its data and annotations once successfully
// resolved.
type statusDataPatch struct {
Annotations map[string]string `json:"annotations"`
Data string `json:"data"`
Source *pipelinev1beta1.ConfigSource `json:"source"`
RefSource *pipelinev1.RefSource `json:"refSource"`
}
func (r *Reconciler) writeResolvedData(ctx context.Context, rr *v1beta1.ResolutionRequest, resource ResolvedResource) error {
encodedData := base64.StdEncoding.Strict().EncodeToString(resource.Data())
patchBytes, err := json.Marshal(map[string]statusDataPatch{
"status": {
Data: encodedData,
Annotations: resource.Annotations(),
RefSource: resource.RefSource(),
Source: (*pipelinev1beta1.ConfigSource)(resource.RefSource()),
},
})
if err != nil {
return r.OnError(ctx, rr, &resolutioncommon.UpdatingRequestError{
ResolutionRequestKey: fmt.Sprintf("%s/%s", rr.Namespace, rr.Name),
Original: fmt.Errorf("error serializing resource request patch: %w", err),
})
}
_, err = r.resolutionRequestClientSet.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Patch(ctx, rr.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if err != nil {
return r.OnError(ctx, rr, &resolutioncommon.UpdatingRequestError{
ResolutionRequestKey: fmt.Sprintf("%s/%s", rr.Namespace, rr.Name),
Original: err,
})
}
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"encoding/base64"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
"github.com/tektoncd/pipeline/test/names"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
testclock "k8s.io/utils/clock/testing"
"knative.dev/pkg/apis"
cminformer "knative.dev/pkg/configmap/informer"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
pkgreconciler "knative.dev/pkg/reconciler"
"knative.dev/pkg/system"
)
var (
now = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC)
testClock = testclock.NewFakePassiveClock(now)
ignoreLastTransitionTime = cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime.Inner.Time")
)
// ResolverReconcileTestModifier is a function thaat will be invoked after the test assets and controller have been created
type ResolverReconcileTestModifier = func(resolver framework.Resolver, testAssets test.Assets)
// RunResolverReconcileTest takes data to seed clients and informers, a Resolver, a ResolutionRequest, and the expected
// ResolutionRequestStatus and error, both of which can be nil. It instantiates a controller for that resolver and
// reconciles the given request. It then checks for the expected error, if any, and compares the resulting status with
// the expected status.
func RunResolverReconcileTest(ctx context.Context, t *testing.T, d test.Data, resolver framework.Resolver, request *v1beta1.ResolutionRequest,
expectedStatus *v1beta1.ResolutionRequestStatus, expectedErr error, resolverModifiers ...ResolverReconcileTestModifier) {
t.Helper()
testAssets, cancel := GetResolverFrameworkController(ctx, t, d, resolver, setClockOnReconciler)
defer cancel()
for _, rm := range resolverModifiers {
rm(resolver, testAssets)
}
err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRequestName(request))
if expectedErr != nil {
if err == nil {
t.Fatalf("expected to get error: `%v`, but got nothing", expectedErr)
}
if expectedErr.Error() != err.Error() {
t.Fatalf("expected to get error `%v`, but got `%v`", expectedErr, err)
}
} else if err != nil {
if ok, _ := controller.IsRequeueKey(err); !ok {
t.Fatalf("did not expect an error, but got `%v`", err)
}
}
c := testAssets.Clients.ResolutionRequests.ResolutionV1beta1()
reconciledRR, err := c.ResolutionRequests(request.Namespace).Get(testAssets.Ctx, request.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("getting updated ResolutionRequest: %v", err)
}
if expectedStatus != nil {
if d := cmp.Diff(*expectedStatus, reconciledRR.Status, ignoreLastTransitionTime); d != "" {
t.Errorf("ResolutionRequest status doesn't match %s", diff.PrintWantGot(d))
if expectedStatus.Data != "" && expectedStatus.Data != reconciledRR.Status.Data {
decodedExpectedData, err := base64.StdEncoding.Strict().DecodeString(expectedStatus.Data)
if err != nil {
t.Errorf("couldn't decode expected data: %v", err)
return
}
decodedGotData, err := base64.StdEncoding.Strict().DecodeString(reconciledRR.Status.Data)
if err != nil {
t.Errorf("couldn't decode reconciled data: %v", err)
return
}
if d := cmp.Diff(decodedExpectedData, decodedGotData); d != "" {
t.Errorf("decoded data did not match expected: %s", diff.PrintWantGot(d))
}
}
}
}
}
// GetResolverFrameworkController returns an instance of the resolver framework controller/reconciler using the given resolver,
// seeded with d, where d represents the state of the system (existing resources) needed for the test.
func GetResolverFrameworkController(ctx context.Context, t *testing.T, d test.Data, resolver framework.Resolver, modifiers ...framework.ReconcilerModifier) (test.Assets, func()) {
t.Helper()
names.TestingSeed()
return initializeResolverFrameworkControllerAssets(ctx, t, d, resolver, modifiers...)
}
func initializeResolverFrameworkControllerAssets(ctx context.Context, t *testing.T, d test.Data, resolver framework.Resolver, modifiers ...framework.ReconcilerModifier) (test.Assets, func()) {
t.Helper()
ctx, cancel := context.WithCancel(ctx)
ensureConfigurationConfigMapsExist(&d)
c, informers := test.SeedTestData(t, ctx, d)
configMapWatcher := cminformer.NewInformedWatcher(c.Kube, resolverconfig.ResolversNamespace(system.Namespace()))
ctl := framework.NewController(ctx, resolver, modifiers...)(ctx, configMapWatcher)
if err := configMapWatcher.Start(ctx.Done()); err != nil {
t.Fatalf("error starting configmap watcher: %v", err)
}
if la, ok := ctl.Reconciler.(pkgreconciler.LeaderAware); ok {
_ = la.Promote(pkgreconciler.UniversalBucket(), func(pkgreconciler.Bucket, types.NamespacedName) {})
}
return test.Assets{
Logger: logging.FromContext(ctx),
Controller: ctl,
Clients: c,
Informers: informers,
Recorder: controller.GetEventRecorder(ctx).(*record.FakeRecorder),
Ctx: ctx,
}, cancel
}
func getRequestName(rr *v1beta1.ResolutionRequest) string {
return strings.Join([]string{rr.Namespace, rr.Name}, "/")
}
func setClockOnReconciler(r *framework.Reconciler) {
if r.Clock == nil {
r.Clock = testClock
}
}
func ensureConfigurationConfigMapsExist(d *test.Data) {
var featureFlagsExists bool
var resolverCacheConfigExists bool
for _, cm := range d.ConfigMaps {
if cm.Name == resolverconfig.GetFeatureFlagsConfigName() {
featureFlagsExists = true
}
if cm.Name == "resolver-cache-config" {
resolverCacheConfigExists = true
}
}
if !featureFlagsExists {
d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: resolverconfig.GetFeatureFlagsConfigName(),
Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
},
Data: map[string]string{},
})
}
if !resolverCacheConfigExists {
d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "resolver-cache-config",
Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
},
Data: map[string]string{},
})
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
)
// ContextWithGitResolverDisabled returns a context containing a Config with the enable-git-resolver feature flag disabled.
func ContextWithGitResolverDisabled(ctx context.Context) context.Context {
return contextWithResolverDisabled(ctx, "enable-git-resolver")
}
// ContextWithHubResolverDisabled returns a context containing a Config with the enable-hub-resolver feature flag disabled.
func ContextWithHubResolverDisabled(ctx context.Context) context.Context {
return contextWithResolverDisabled(ctx, "enable-hub-resolver")
}
// ContextWithBundlesResolverDisabled returns a context containing a Config with the enable-bundles-resolver feature flag disabled.
func ContextWithBundlesResolverDisabled(ctx context.Context) context.Context {
return contextWithResolverDisabled(ctx, "enable-bundles-resolver")
}
// ContextWithClusterResolverDisabled returns a context containing a Config with the enable-cluster-resolver feature flag disabled.
func ContextWithClusterResolverDisabled(ctx context.Context) context.Context {
return contextWithResolverDisabled(ctx, "enable-cluster-resolver")
}
// ContextWithHttpResolverDisabled returns a context containing a Config with the enable-http-resolver feature flag disabled.
func ContextWithHttpResolverDisabled(ctx context.Context) context.Context {
return contextWithResolverDisabled(ctx, "enable-http-resolver")
}
func contextWithResolverDisabled(ctx context.Context, resolverFlag string) context.Context {
featureFlags, _ := resolverconfig.NewFeatureFlagsFromMap(map[string]string{
resolverFlag: "false",
})
cfg := &resolverconfig.Config{
FeatureFlags: featureFlags,
}
return resolverconfig.ToContext(ctx, cfg)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package git
import (
"context"
"fmt"
"reflect"
"strings"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
)
const (
// DefaultTimeoutKey is the configuration field name for controlling
// the maximum duration of a resolution request for a file from git.
DefaultTimeoutKey = "fetch-timeout"
// DefaultURLKey is the configuration field name for controlling
// the git url to fetch the remote resource from.
DefaultURLKey = "default-url"
// DefaultRevisionKey is the configuration field name for controlling
// the revision to fetch the remote resource from.
DefaultRevisionKey = "default-revision"
// DefaultOrgKey is the configuration field name for setting a default organization when using the SCM API.
DefaultOrgKey = "default-org"
// ServerURLKey is the config map key for the SCM provider URL
ServerURLKey = "server-url"
// SCMTypeKey is the config map key for the SCM provider type
SCMTypeKey = "scm-type"
// APISecretNameKey is the config map key for the token secret's name
APISecretNameKey = "api-token-secret-name"
// APISecretKeyKey is the config map key for the containing the token within the token secret
APISecretKeyKey = "api-token-secret-key"
// APISecretNamespaceKey is the config map key for the token secret's namespace
APISecretNamespaceKey = "api-token-secret-namespace"
)
type GitResolverConfig map[string]ScmConfig
type ScmConfig struct {
Timeout string `json:"fetch-timeout"`
URL string `json:"default-url"`
Revision string `json:"default-revision"`
Org string `json:"default-org"`
ServerURL string `json:"server-url"`
SCMType string `json:"scm-type"`
GitToken string `json:"git-token"`
APISecretName string `json:"api-token-secret-name"`
APISecretKey string `json:"api-token-secret-key"`
APISecretNamespace string `json:"api-token-secret-namespace"`
}
func GetGitResolverConfig(ctx context.Context) (GitResolverConfig, error) {
var scmConfig interface{} = &ScmConfig{}
structType := reflect.TypeOf(scmConfig).Elem()
gitResolverConfig := map[string]ScmConfig{}
conf := framework.GetResolverConfigFromContext(ctx)
for key, value := range conf {
var configIdentifier, configKey string
splittedKeyName := strings.Split(key, ".")
switch len(splittedKeyName) {
case 2:
configKey = splittedKeyName[1]
configIdentifier = splittedKeyName[0]
case 1:
configKey = key
configIdentifier = "default"
default:
return nil, fmt.Errorf("key %s passed in git resolver configmap is invalid", key)
}
_, ok := gitResolverConfig[configIdentifier]
if !ok {
gitResolverConfig[configIdentifier] = ScmConfig{}
}
for i := range structType.NumField() {
field := structType.Field(i)
fieldName := field.Name
jsonTag := field.Tag.Get("json")
if configKey == jsonTag {
tokenDetails := gitResolverConfig[configIdentifier]
var scm interface{} = &tokenDetails
structValue := reflect.ValueOf(scm).Elem()
structValue.FieldByName(fieldName).SetString(value)
gitResolverConfig[configIdentifier] = structValue.Interface().(ScmConfig)
}
}
}
return gitResolverConfig, nil
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package git
import (
"context"
"encoding/base64"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
type cmdExecutor = func(context.Context, string, ...string) *exec.Cmd
type remote struct {
url string
username string
password string
cmdExecutor cmdExecutor
}
func (r remote) clone(ctx context.Context) (*repository, func(), error) {
urlParts := strings.Split(r.url, "/")
repoName := urlParts[len(urlParts)-1]
tmpDir, err := os.MkdirTemp("", repoName+"-*")
if err != nil {
return nil, func() {}, err
}
cleanupFunc := func() {
os.RemoveAll(tmpDir)
}
repo := &repository{
url: r.url,
username: r.username,
password: r.password,
directory: tmpDir,
executor: r.cmdExecutor,
}
_, err = repo.execGit(ctx, "clone", repo.url, tmpDir, "--depth=1", "--no-checkout")
if err != nil {
if strings.Contains(err.Error(), "could not read Username") {
err = errors.New("clone error: authentication required")
}
return nil, cleanupFunc, err
}
return repo, cleanupFunc, nil
}
type repository struct {
url string
username string
password string
directory string
executor cmdExecutor
}
func (repo *repository) currentRevision(ctx context.Context) (string, error) {
revisionSha, err := repo.execGit(ctx, "rev-list", "-n1", "HEAD")
if err != nil {
return "", err
}
return strings.TrimSpace(string(revisionSha)), nil
}
func (repo *repository) checkout(ctx context.Context, revision string) error {
_, err := repo.execGit(ctx, "fetch", "origin", revision, "--depth=1")
if err != nil {
return err
}
_, err = repo.execGit(ctx, "checkout", "FETCH_HEAD")
if err != nil {
return err
}
return nil
}
func (repo *repository) execGit(ctx context.Context, subCmd string, args ...string) ([]byte, error) {
if repo.executor == nil {
repo.executor = exec.CommandContext
}
args = append([]string{subCmd}, args...)
// We need to configure which directory contains the cloned repository since `cd`ing
// into the repository directory is not concurrency-safe
configArgs := []string{"-C", repo.directory}
env := []string{"GIT_TERMINAL_PROMPT=false"}
// NOTE: Since this is only HTTP basic auth, authentication is only supported for http
// cloning, while unauthenticated cloning is supported for any other protocol supported
// by git which doesn't require authentication.
if repo.username != "" && repo.password != "" {
token := base64.URLEncoding.EncodeToString([]byte(repo.username + ":" + repo.password))
env = append(
env,
"GIT_AUTH_HEADER=Authorization: Basic "+token,
)
configArgs = append(configArgs, "--config-env", "http.extraHeader=GIT_AUTH_HEADER")
}
cmd := repo.executor(ctx, "git", append(configArgs, args...)...)
cmd.Env = append(cmd.Environ(), env...)
out, err := cmd.Output()
if err != nil {
msg := string(out)
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
msg = string(exitErr.Stderr)
}
err = fmt.Errorf("git %s error: %s: %w", subCmd, strings.TrimSpace(msg), err)
}
return out, err
}
func (repo *repository) getFileContent(path string) ([]byte, error) {
if _, err := os.Stat(repo.directory); errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("repository clone no longer exists, used after cleaned? %w", err)
}
fileContents, err := os.ReadFile(filepath.Join(repo.directory, path))
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, errors.New("file does not exist")
}
return nil, err
}
return fileContents, nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package git
import (
"context"
"errors"
"fmt"
"os"
"regexp"
"strings"
"time"
"github.com/jenkins-x/go-scm/scm"
"github.com/jenkins-x/go-scm/scm/factory"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
common "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"go.uber.org/zap"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/cache"
"k8s.io/client-go/kubernetes"
kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/pkg/logging"
)
const (
disabledError = "cannot handle resolution request, enable-git-resolver feature flag not true"
// labelValueGitResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
labelValueGitResolverType string = "git"
// gitResolverName is the name that the git resolver should be
// associated with
gitResolverName string = "Git"
// yamlContentType is the content type to use when returning yaml
yamlContentType string = "application/x-yaml"
// ConfigMapName is the git resolver's config map
ConfigMapName = "git-resolver-config"
// cacheSize is the size of the LRU secrets cache
cacheSize = 1024
// ttl is the time to live for a cache entry
ttl = 5 * time.Minute
)
var _ framework.Resolver = &Resolver{}
// Resolver implements a framework.Resolver that can fetch files from git.
//
// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/git.Resolver] instead.
type Resolver struct {
kubeClient kubernetes.Interface
logger *zap.SugaredLogger
cache *cache.LRUExpireCache
ttl time.Duration
// Used in testing
clientFunc func(string, string, string, ...factory.ClientOptionFunc) (*scm.Client, error)
}
// Initialize performs any setup required by the gitresolver.
func (r *Resolver) Initialize(ctx context.Context) error {
r.kubeClient = kubeclient.Get(ctx)
r.logger = logging.FromContext(ctx)
r.cache = cache.NewLRUExpireCache(cacheSize)
r.ttl = ttl
if r.clientFunc == nil {
r.clientFunc = factory.NewClient
}
return nil
}
// GetName returns the string name that the gitresolver should be
// associated with.
func (r *Resolver) GetName(_ context.Context) string {
return gitResolverName
}
// GetSelector returns the labels that resource requests are required to have for
// the gitresolver to process them.
func (r *Resolver) GetSelector(_ context.Context) map[string]string {
return map[string]string{
common.LabelKeyResolverType: labelValueGitResolverType,
}
}
// ValidateParams returns an error if the given parameter map is not
// valid for a resource request targeting the gitresolver.
func (r *Resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
return ValidateParams(ctx, params)
}
// Resolve performs the work of fetching a file from git given a map of
// parameters.
func (r *Resolver) Resolve(ctx context.Context, origParams []pipelinev1.Param) (framework.ResolvedResource, error) {
if IsDisabled(ctx) {
return nil, errors.New(disabledError)
}
params, err := PopulateDefaultParams(ctx, origParams)
if err != nil {
return nil, err
}
g := &GitResolver{
Params: params,
Logger: r.logger,
Cache: r.cache,
TTL: r.ttl,
KubeClient: r.kubeClient,
}
if params[UrlParam] != "" {
return g.ResolveGitClone(ctx)
}
return g.ResolveAPIGit(ctx, r.clientFunc)
}
func ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
if IsDisabled(ctx) {
return errors.New(disabledError)
}
if _, err := PopulateDefaultParams(ctx, params); err != nil {
return err
}
return nil
}
// validateRepoURL validates if the given URL is a valid git, http, https URL or
// starting with a / (a local repository).
func validateRepoURL(url string) bool {
// Explanation:
pattern := `^(/|[^@]+@[^:]+|(git|https?)://)`
re := regexp.MustCompile(pattern)
return re.MatchString(url)
}
type GitResolver struct {
KubeClient kubernetes.Interface
Logger *zap.SugaredLogger
Cache *cache.LRUExpireCache
TTL time.Duration
Params map[string]string
// Function variables for mocking in tests
ResolveGitCloneFunc func(ctx context.Context) (framework.ResolvedResource, error)
ResolveAPIGitFunc func(ctx context.Context, clientFunc func(string, string, string, ...factory.ClientOptionFunc) (*scm.Client, error)) (framework.ResolvedResource, error)
}
// ResolveGitClone resolves a git resource using git clone.
func (g *GitResolver) ResolveGitClone(ctx context.Context) (framework.ResolvedResource, error) {
if g.ResolveGitCloneFunc != nil {
return g.ResolveGitCloneFunc(ctx)
}
conf, err := GetScmConfigForParamConfigKey(ctx, g.Params)
if err != nil {
return nil, err
}
repoURL := g.Params[UrlParam]
if repoURL == "" {
urlString := conf.URL
if urlString == "" {
return nil, errors.New("default Git Repo Url was not set during installation of the git resolver")
}
}
revision := g.Params[RevisionParam]
if revision == "" {
revisionString := conf.Revision
if revisionString == "" {
return nil, errors.New("default Git Revision was not set during installation of the git resolver")
}
}
var username string
var password string
secretRef := &secretCacheKey{
name: g.Params[GitTokenParam],
key: g.Params[GitTokenKeyParam],
}
if secretRef.name != "" {
if secretRef.key == "" {
secretRef.key = DefaultTokenKeyParam
}
secretRef.ns = common.RequestNamespace(ctx)
} else {
secretRef = nil
}
if secretRef != nil {
gitToken, err := g.getAPIToken(ctx, secretRef, GitTokenKeyParam)
if err != nil {
return nil, err
}
username = "git"
password = string(gitToken)
}
path := g.Params[PathParam]
repo, cleanupFunc, err := remote{url: repoURL, username: username, password: password}.clone(ctx)
defer cleanupFunc()
if err != nil {
return nil, fmt.Errorf("error resolving repository: %w", err)
}
err = repo.checkout(ctx, revision)
if err != nil {
return nil, err
}
fullRevision, err := repo.currentRevision(ctx)
if err != nil {
return nil, err
}
fileContents, err := repo.getFileContent(path)
if err != nil {
return nil, fmt.Errorf("error opening file %q: %w", path, err)
}
return &resolvedGitResource{
Revision: fullRevision,
Content: fileContents,
URL: repo.url,
Path: path,
}, nil
}
// ResolveAPIGit resolves a git resource using the SCM API.
func (g *GitResolver) ResolveAPIGit(ctx context.Context, clientFunc func(string, string, string, ...factory.ClientOptionFunc) (*scm.Client, error)) (framework.ResolvedResource, error) {
if g.ResolveAPIGitFunc != nil {
return g.ResolveAPIGitFunc(ctx, clientFunc)
}
// If we got here, the "repo" param was specified, so use the API approach
scmType, serverURL, err := getSCMTypeAndServerURL(ctx, g.Params)
if err != nil {
return nil, err
}
secretRef := &secretCacheKey{
name: g.Params[TokenParam],
key: g.Params[TokenKeyParam],
}
if secretRef.name != "" {
if secretRef.key == "" {
secretRef.key = DefaultTokenKeyParam
}
secretRef.ns = common.RequestNamespace(ctx)
} else {
secretRef = nil
}
apiToken, err := g.getAPIToken(ctx, secretRef, APISecretNameKey)
if err != nil {
return nil, err
}
scmClient, err := clientFunc(scmType, serverURL, string(apiToken))
if err != nil {
return nil, fmt.Errorf("failed to create SCM client: %w", err)
}
orgRepo := fmt.Sprintf("%s/%s", g.Params[OrgParam], g.Params[RepoParam])
path := g.Params[PathParam]
ref := g.Params[RevisionParam]
// fetch the actual content from a file in the repo
content, _, err := scmClient.Contents.Find(ctx, orgRepo, path, ref)
if err != nil {
return nil, fmt.Errorf("couldn't fetch resource content: %w", err)
}
if content == nil || len(content.Data) == 0 {
return nil, fmt.Errorf("no content for resource in %s %s", orgRepo, path)
}
// find the actual git commit sha by the ref
commit, _, err := scmClient.Git.FindCommit(ctx, orgRepo, ref)
if err != nil || commit == nil {
return nil, fmt.Errorf("couldn't fetch the commit sha for the ref %s in the repo: %w", ref, err)
}
// fetch the repository URL
repo, _, err := scmClient.Repositories.Find(ctx, orgRepo)
if err != nil {
return nil, fmt.Errorf("couldn't fetch repository: %w", err)
}
return &resolvedGitResource{
Content: content.Data,
Revision: commit.Sha,
Org: g.Params[OrgParam],
Repo: g.Params[RepoParam],
Path: content.Path,
URL: repo.Clone,
}, nil
}
var _ framework.ConfigWatcher = &Resolver{}
// GetConfigName returns the name of the git resolver's configmap.
func (r *Resolver) GetConfigName(context.Context) string {
return ConfigMapName
}
var _ framework.TimedResolution = &Resolver{}
// GetResolutionTimeout returns a time.Duration for the amount of time a
// single git fetch may take. This can be configured with the
// fetch-timeout field in the git-resolver-config configmap.
func (r *Resolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration, params map[string]string) (time.Duration, error) {
conf, err := GetScmConfigForParamConfigKey(ctx, params)
if err != nil {
return time.Duration(0), err
}
if timeoutString := conf.Timeout; timeoutString != "" {
timeout, err := time.ParseDuration(timeoutString)
if err != nil {
return time.Duration(0), err
}
return timeout, nil
}
return defaultTimeout, nil
}
func PopulateDefaultParams(ctx context.Context, params []pipelinev1.Param) (map[string]string, error) {
paramsMap := make(map[string]string)
for _, p := range params {
paramsMap[p.Name] = p.Value.StringVal
}
conf, err := GetScmConfigForParamConfigKey(ctx, paramsMap)
if err != nil {
return nil, err
}
var missingParams []string
if _, ok := paramsMap[RevisionParam]; !ok {
defaultRevision := conf.Revision
if defaultRevision != "" {
paramsMap[RevisionParam] = defaultRevision
} else {
missingParams = append(missingParams, RevisionParam)
}
}
if _, ok := paramsMap[PathParam]; !ok {
missingParams = append(missingParams, PathParam)
}
if paramsMap[UrlParam] != "" && paramsMap[RepoParam] != "" {
return nil, fmt.Errorf("cannot specify both '%s' and '%s'", UrlParam, RepoParam)
}
if paramsMap[UrlParam] == "" && paramsMap[RepoParam] == "" {
urlString := conf.URL
if urlString != "" {
paramsMap[UrlParam] = urlString
} else {
return nil, fmt.Errorf("must specify one of '%s' or '%s'", UrlParam, RepoParam)
}
}
if paramsMap[RepoParam] != "" {
if _, ok := paramsMap[OrgParam]; !ok {
defaultOrg := conf.Org
if defaultOrg != "" {
paramsMap[OrgParam] = defaultOrg
} else {
return nil, fmt.Errorf("'%s' is required when '%s' is specified", OrgParam, RepoParam)
}
}
}
if len(missingParams) > 0 {
return nil, fmt.Errorf("missing required git resolver params: %s", strings.Join(missingParams, ", "))
}
// validate the url params if we are not using the SCM API
if paramsMap[RepoParam] == "" && paramsMap[OrgParam] == "" && !validateRepoURL(paramsMap[UrlParam]) {
return nil, fmt.Errorf("invalid git repository url: %s", paramsMap[UrlParam])
}
// TODO(sbwsg): validate pathInRepo is valid relative pathInRepo
return paramsMap, nil
}
// supports the SPDX format which is recommended by in-toto
// ref: https://spdx.dev/spdx-specification-21-web-version/#h.49x2ik5
// ref: https://github.com/in-toto/attestation/blob/main/spec/field_types.md
func spdxGit(url string) string {
return "git+" + url
}
// resolvedGitResource implements framework.ResolvedResource and returns
// the resolved file []byte data and an annotation map for any metadata.
type resolvedGitResource struct {
Revision string
Content []byte
Org string
Repo string
Path string
URL string
}
var _ framework.ResolvedResource = &resolvedGitResource{}
// Data returns the bytes of the file resolved from git.
func (r *resolvedGitResource) Data() []byte {
return r.Content
}
// Annotations returns the metadata that accompanies the file fetched
// from git.
func (r *resolvedGitResource) Annotations() map[string]string {
m := map[string]string{
AnnotationKeyRevision: r.Revision,
AnnotationKeyPath: r.Path,
AnnotationKeyURL: r.URL,
common.AnnotationKeyContentType: yamlContentType,
}
if r.Org != "" {
m[AnnotationKeyOrg] = r.Org
}
if r.Repo != "" {
m[AnnotationKeyRepo] = r.Repo
}
return m
}
// RefSource is the source reference of the remote data that records where the remote
// file came from including the url, digest and the entrypoint.
func (r *resolvedGitResource) RefSource() *pipelinev1.RefSource {
return &pipelinev1.RefSource{
URI: spdxGit(r.URL),
Digest: map[string]string{
"sha1": r.Revision,
},
EntryPoint: r.Path,
}
}
type secretCacheKey struct {
ns string
name string
key string
}
func (g *GitResolver) getAPIToken(ctx context.Context, apiSecret *secretCacheKey, key string) ([]byte, error) {
conf, err := GetScmConfigForParamConfigKey(ctx, g.Params)
if err != nil {
return nil, err
}
ok := false
// NOTE(chmouel): only cache secrets when user hasn't passed params in their resolver configuration
cacheSecret := false
if apiSecret == nil {
cacheSecret = true
apiSecret = &secretCacheKey{}
}
if apiSecret.name == "" {
apiSecret.name = conf.APISecretName
if apiSecret.name == "" {
err := fmt.Errorf("cannot get API token, required when specifying '%s' param, '%s' not specified in config", RepoParam, key)
g.Logger.Info(err)
return nil, err
}
}
if apiSecret.key == "" {
apiSecret.key = conf.APISecretKey
if apiSecret.key == "" {
err := fmt.Errorf("cannot get API token, required when specifying '%s' param, '%s' not specified in config", RepoParam, APISecretKeyKey)
g.Logger.Info(err)
return nil, err
}
}
if apiSecret.ns == "" {
apiSecret.ns = conf.APISecretNamespace
if apiSecret.ns == "" {
apiSecret.ns = os.Getenv("SYSTEM_NAMESPACE")
}
}
if cacheSecret {
val, ok := g.Cache.Get(apiSecret)
if ok {
return val.([]byte), nil
}
}
secret, err := g.KubeClient.CoreV1().Secrets(apiSecret.ns).Get(ctx, apiSecret.name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
notFoundErr := fmt.Errorf("cannot get API token, secret %s not found in namespace %s", apiSecret.name, apiSecret.ns)
g.Logger.Info(notFoundErr)
return nil, notFoundErr
}
wrappedErr := fmt.Errorf("error reading API token from secret %s in namespace %s: %w", apiSecret.name, apiSecret.ns, err)
g.Logger.Info(wrappedErr)
return nil, wrappedErr
}
secretVal, ok := secret.Data[apiSecret.key]
if !ok {
err := fmt.Errorf("cannot get API token, key %s not found in secret %s in namespace %s", apiSecret.key, apiSecret.name, apiSecret.ns)
g.Logger.Info(err)
return nil, err
}
if cacheSecret {
g.Cache.Add(apiSecret, secretVal, ttl)
}
return secretVal, nil
}
func getSCMTypeAndServerURL(ctx context.Context, params map[string]string) (string, string, error) {
conf, err := GetScmConfigForParamConfigKey(ctx, params)
if err != nil {
return "", "", err
}
var scmType, serverURL string
if key, ok := params[ScmTypeParam]; ok {
scmType = key
}
if scmType == "" {
scmType = conf.SCMType
}
if key, ok := params[ServerURLParam]; ok {
serverURL = key
}
if serverURL == "" {
serverURL = conf.ServerURL
}
return scmType, serverURL, nil
}
func IsDisabled(ctx context.Context) bool {
cfg := resolverconfig.FromContextOrDefaults(ctx)
return !cfg.FeatureFlags.EnableGitResolver
}
func GetScmConfigForParamConfigKey(ctx context.Context, params map[string]string) (ScmConfig, error) {
gitResolverConfig, err := GetGitResolverConfig(ctx)
if err != nil {
return ScmConfig{}, err
}
if configKeyToUse, ok := params[ConfigKeyParam]; ok {
if config, exist := gitResolverConfig[configKeyToUse]; exist {
return config, nil
}
return ScmConfig{}, fmt.Errorf("no git resolver configuration found for configKey %s", configKeyToUse)
}
return gitResolverConfig["default"], nil
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package http
import (
"context"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
common "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"go.uber.org/zap"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/pkg/logging"
)
const (
// LabelValueHttpResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
LabelValueHttpResolverType string = "http"
disabledError = "cannot handle resolution request, enable-http-resolver feature flag not true"
// httpResolverName The name of the resolver
httpResolverName = "Http"
// configMapName is the http resolver's config map
configMapName = "http-resolver-config"
// default Timeout value when fetching http resources in seconds
defaultHttpTimeoutValue = "1m"
// default key in the HTTP password secret
defaultBasicAuthSecretKey = "password"
)
// Resolver implements a framework.Resolver that can fetch files from an HTTP URL
//
// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/http.Resolver] instead.
type Resolver struct {
kubeClient kubernetes.Interface
logger *zap.SugaredLogger
}
func (r *Resolver) Initialize(ctx context.Context) error {
r.kubeClient = kubeclient.Get(ctx)
r.logger = logging.FromContext(ctx)
return nil
}
// GetName returns a string name to refer to this resolver by.
func (r *Resolver) GetName(context.Context) string {
return httpResolverName
}
// GetConfigName returns the name of the http resolver's configmap.
func (r *Resolver) GetConfigName(context.Context) string {
return configMapName
}
// GetSelector returns a map of labels to match requests to this resolver.
func (r *Resolver) GetSelector(context.Context) map[string]string {
return map[string]string{
common.LabelKeyResolverType: LabelValueHttpResolverType,
}
}
// ValidateParams ensures parameters from a request are as expected.
func (r *Resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
return ValidateParams(ctx, params)
}
// Resolve uses the given params to resolve the requested file or resource.
func (r *Resolver) Resolve(ctx context.Context, oParams []pipelinev1.Param) (framework.ResolvedResource, error) {
if IsDisabled(ctx) {
return nil, errors.New(disabledError)
}
params, err := PopulateDefaultParams(ctx, oParams)
if err != nil {
return nil, err
}
return FetchHttpResource(ctx, params, r.kubeClient, r.logger)
}
func IsDisabled(ctx context.Context) bool {
cfg := resolverconfig.FromContextOrDefaults(ctx)
return !cfg.FeatureFlags.EnableHttpResolver
}
// resolvedHttpResource wraps the data we want to return to Pipelines
type resolvedHttpResource struct {
URL string
Content []byte
}
var _ framework.ResolvedResource = &resolvedHttpResource{}
// Data returns the bytes of our hard-coded Pipeline
func (rr *resolvedHttpResource) Data() []byte {
return rr.Content
}
// Annotations returns any metadata needed alongside the data. None atm.
func (*resolvedHttpResource) Annotations() map[string]string {
return nil
}
// RefSource is the source reference of the remote data that records where the remote
// file came from including the url, digest and the entrypoint.
func (rr *resolvedHttpResource) RefSource() *pipelinev1.RefSource {
h := sha256.New()
h.Write(rr.Content)
sha256CheckSum := hex.EncodeToString(h.Sum(nil))
return &pipelinev1.RefSource{
URI: rr.URL,
Digest: map[string]string{
"sha256": sha256CheckSum,
},
}
}
func PopulateDefaultParams(ctx context.Context, params []pipelinev1.Param) (map[string]string, error) {
paramsMap := make(map[string]string)
for _, p := range params {
paramsMap[p.Name] = p.Value.StringVal
}
var missingParams []string
if _, ok := paramsMap[UrlParam]; !ok {
missingParams = append(missingParams, UrlParam)
} else {
u, err := url.ParseRequestURI(paramsMap[UrlParam])
if err != nil {
return nil, fmt.Errorf("cannot parse url %s: %w", paramsMap[UrlParam], err)
}
if u.Scheme != "http" && u.Scheme != "https" {
return nil, fmt.Errorf("url %s is not a valid http(s) url", paramsMap[UrlParam])
}
}
if username, ok := paramsMap[HttpBasicAuthUsername]; ok {
if _, ok := paramsMap[HttpBasicAuthSecret]; !ok {
return nil, fmt.Errorf("missing required param %s when using %s", HttpBasicAuthSecret, HttpBasicAuthUsername)
}
if username == "" {
return nil, fmt.Errorf("value %s cannot be empty", HttpBasicAuthUsername)
}
}
if secret, ok := paramsMap[HttpBasicAuthSecret]; ok {
if _, ok := paramsMap[HttpBasicAuthUsername]; !ok {
return nil, fmt.Errorf("missing required param %s when using %s", HttpBasicAuthUsername, HttpBasicAuthSecret)
}
if secret == "" {
return nil, fmt.Errorf("value %s cannot be empty", HttpBasicAuthSecret)
}
}
if len(missingParams) > 0 {
return nil, fmt.Errorf("missing required http resolver params: %s", strings.Join(missingParams, ", "))
}
return paramsMap, nil
}
func makeHttpClient(ctx context.Context) (*http.Client, error) {
conf := framework.GetResolverConfigFromContext(ctx)
timeout, _ := time.ParseDuration(defaultHttpTimeoutValue)
if v, ok := conf[TimeoutKey]; ok {
var err error
timeout, err = time.ParseDuration(v)
if err != nil {
return nil, fmt.Errorf("error parsing timeout value %s: %w", v, err)
}
}
return &http.Client{
Timeout: timeout,
}, nil
}
func FetchHttpResource(ctx context.Context, params map[string]string, kubeclient kubernetes.Interface, logger *zap.SugaredLogger) (framework.ResolvedResource, error) {
var targetURL string
var ok bool
httpClient, err := makeHttpClient(ctx)
if err != nil {
return nil, err
}
if targetURL, ok = params[UrlParam]; !ok {
return nil, fmt.Errorf("missing required params: %s", UrlParam)
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil)
if err != nil {
return nil, fmt.Errorf("constructing request: %w", err)
}
// NOTE(chmouel): We already made sure that username and secret was specified by the user
if secret, ok := params[HttpBasicAuthSecret]; ok && secret != "" {
if encodedSecret, err := getBasicAuthSecret(ctx, params, kubeclient, logger); err != nil {
return nil, err
} else {
req.Header.Set("Authorization", encodedSecret)
}
}
resp, err := httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("error fetching URL: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("requested URL '%s' is not found", targetURL)
}
defer func() {
_ = resp.Body.Close()
}()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error reading response body: %w", err)
}
return &resolvedHttpResource{
Content: body,
URL: targetURL,
}, nil
}
func getBasicAuthSecret(ctx context.Context, params map[string]string, kubeclient kubernetes.Interface, logger *zap.SugaredLogger) (string, error) {
secretName := params[HttpBasicAuthSecret]
userName := params[HttpBasicAuthUsername]
tokenSecretKey := defaultBasicAuthSecretKey
if v, ok := params[HttpBasicAuthSecretKey]; ok {
if v != "" {
tokenSecretKey = v
}
}
secretNS := common.RequestNamespace(ctx)
secret, err := kubeclient.CoreV1().Secrets(secretNS).Get(ctx, secretName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
notFoundErr := fmt.Errorf("cannot get API token, secret %s not found in namespace %s", secretName, secretNS)
logger.Info(notFoundErr)
return "", notFoundErr
}
wrappedErr := fmt.Errorf("error reading API token from secret %s in namespace %s: %w", secretName, secretNS, err)
logger.Info(wrappedErr)
return "", wrappedErr
}
secretVal, ok := secret.Data[tokenSecretKey]
if !ok {
err := fmt.Errorf("cannot get API token, key %s not found in secret %s in namespace %s", tokenSecretKey, secretName, secretNS)
logger.Info(err)
return "", err
}
return "Basic " + base64.StdEncoding.EncodeToString(
[]byte(fmt.Sprintf("%s:%s", userName, secretVal))), nil
}
func ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
if IsDisabled(ctx) {
return errors.New(disabledError)
}
_, err := PopulateDefaultParams(ctx, params)
if err != nil {
return err
}
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hub
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"slices"
"strings"
goversion "github.com/hashicorp/go-version"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
common "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
)
const (
// LabelValueHubResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
LabelValueHubResolverType string = "hub"
// ArtifactHubType is the value to use setting the type field to artifact
ArtifactHubType string = "artifact"
// TektonHubType is the value to use setting the type field to tekton
TektonHubType string = "tekton"
disabledError = "cannot handle resolution request, enable-hub-resolver feature flag not true"
)
var supportedKinds = []string{"task", "pipeline", "stepaction"}
// Resolver implements a framework.Resolver that can fetch files from OCI bundles.
//
// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/hub.Resolver] instead.
type Resolver struct {
// TektonHubURL is the URL for hub resolver with type tekton
TektonHubURL string
// ArtifactHubURL is the URL for hub resolver with type artifact
ArtifactHubURL string
}
// Initialize sets up any dependencies needed by the resolver. None atm.
func (r *Resolver) Initialize(context.Context) error {
return nil
}
// GetName returns a string name to refer to this resolver by.
func (r *Resolver) GetName(context.Context) string {
return "Hub"
}
// GetConfigName returns the name of the bundle resolver's configmap.
func (r *Resolver) GetConfigName(context.Context) string {
return "hubresolver-config"
}
// GetSelector returns a map of labels to match requests to this resolver.
func (r *Resolver) GetSelector(context.Context) map[string]string {
return map[string]string{
common.LabelKeyResolverType: LabelValueHubResolverType,
}
}
// ValidateParams ensures parameters from a request are as expected.
func (r *Resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
return ValidateParams(ctx, params, r.TektonHubURL)
}
func ValidateParams(ctx context.Context, params []pipelinev1.Param, tektonHubUrl string) error {
if isDisabled(ctx) {
return errors.New(disabledError)
}
paramsMap, err := populateDefaultParams(ctx, params)
if err != nil {
return fmt.Errorf("failed to populate default params: %w", err)
}
if err := validateParams(ctx, paramsMap, tektonHubUrl); err != nil {
return fmt.Errorf("failed to validate params: %w", err)
}
return nil
}
type tektonHubDataResponse struct {
YAML string `json:"yaml"`
}
type tektonHubResponse struct {
Data tektonHubDataResponse `json:"data"`
}
type artifactHubDataResponse struct {
YAML string `json:"manifestRaw"`
}
type artifactHubResponse struct {
Data artifactHubDataResponse `json:"data"`
}
// Resolve uses the given params to resolve the requested file or resource.
func (r *Resolver) Resolve(ctx context.Context, params []pipelinev1.Param) (framework.ResolvedResource, error) {
return Resolve(ctx, params, r.TektonHubURL, r.ArtifactHubURL)
}
func Resolve(ctx context.Context, params []pipelinev1.Param, tektonHubURL, artifactHubURL string) (framework.ResolvedResource, error) {
if isDisabled(ctx) {
return nil, errors.New(disabledError)
}
paramsMap, err := populateDefaultParams(ctx, params)
if err != nil {
return nil, fmt.Errorf("failed to populate default params: %w", err)
}
if err := validateParams(ctx, paramsMap, tektonHubURL); err != nil {
return nil, fmt.Errorf("failed to validate params: %w", err)
}
if constraint, err := goversion.NewConstraint(paramsMap[ParamVersion]); err == nil {
chosen, err := resolveVersionConstraint(ctx, paramsMap, constraint, artifactHubURL, tektonHubURL)
if err != nil {
return nil, err
}
paramsMap[ParamVersion] = chosen.String()
}
resVer, err := resolveVersion(paramsMap[ParamVersion], paramsMap[ParamType])
if err != nil {
return nil, err
}
paramsMap[ParamVersion] = resVer
// call hub API
switch paramsMap[ParamType] {
case ArtifactHubType:
url := fmt.Sprintf(fmt.Sprintf("%s/%s", artifactHubURL, ArtifactHubYamlEndpoint),
paramsMap[ParamKind], paramsMap[ParamCatalog], paramsMap[ParamName], paramsMap[ParamVersion])
resp := artifactHubResponse{}
if err := fetchHubResource(ctx, url, &resp); err != nil {
return nil, fmt.Errorf("fail to fetch Artifact Hub resource: %w", err)
}
return &ResolvedHubResource{
URL: url,
Content: []byte(resp.Data.YAML),
}, nil
case TektonHubType:
url := fmt.Sprintf(fmt.Sprintf("%s/%s", tektonHubURL, TektonHubYamlEndpoint),
paramsMap[ParamCatalog], paramsMap[ParamKind], paramsMap[ParamName], paramsMap[ParamVersion])
resp := tektonHubResponse{}
if err := fetchHubResource(ctx, url, &resp); err != nil {
return nil, fmt.Errorf("fail to fetch Tekton Hub resource: %w", err)
}
return &ResolvedHubResource{
URL: url,
Content: []byte(resp.Data.YAML),
}, nil
}
return nil, fmt.Errorf("hub resolver type: %s is not supported", paramsMap[ParamType])
}
// ResolvedHubResource wraps the data we want to return to Pipelines
type ResolvedHubResource struct {
URL string
Content []byte
}
var _ framework.ResolvedResource = &ResolvedHubResource{}
// Data returns the bytes of our hard-coded Pipeline
func (rr *ResolvedHubResource) Data() []byte {
return rr.Content
}
// Annotations returns any metadata needed alongside the data. None atm.
func (*ResolvedHubResource) Annotations() map[string]string {
return nil
}
// RefSource is the source reference of the remote data that records where the remote
// file came from including the url, digest and the entrypoint.
func (rr *ResolvedHubResource) RefSource() *pipelinev1.RefSource {
h := sha256.New()
h.Write(rr.Content)
sha256CheckSum := hex.EncodeToString(h.Sum(nil))
return &pipelinev1.RefSource{
URI: rr.URL,
Digest: map[string]string{
"sha256": sha256CheckSum,
},
}
}
func isDisabled(ctx context.Context) bool {
cfg := resolverconfig.FromContextOrDefaults(ctx)
return !cfg.FeatureFlags.EnableHubResolver
}
func fetchHubResource(ctx context.Context, apiEndpoint string, v interface{}) error {
// #nosec G107 -- URL cannot be constant in this case.
req, err := http.NewRequestWithContext(ctx, http.MethodGet, apiEndpoint, nil)
if err != nil {
return fmt.Errorf("constructing request: %w", err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("requesting resource from Hub: %w", err)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("requested resource '%s' not found on hub", apiEndpoint)
}
defer func() {
_ = resp.Body.Close()
}()
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("error reading response body: %w", err)
}
err = json.Unmarshal(body, v)
if err != nil {
return fmt.Errorf("error unmarshalling json response: %w", err)
}
return nil
}
func resolveCatalogName(paramsMap, conf map[string]string) (string, error) {
var configTHCatalog, configAHTaskCatalog, configAHPipelineCatalog string
var ok bool
if configTHCatalog, ok = conf[ConfigTektonHubCatalog]; !ok {
return "", errors.New("default Tekton Hub catalog was not set during installation of the hub resolver")
}
if configAHTaskCatalog, ok = conf[ConfigArtifactHubTaskCatalog]; !ok {
return "", errors.New("default Artifact Hub task catalog was not set during installation of the hub resolver")
}
if configAHPipelineCatalog, ok = conf[ConfigArtifactHubPipelineCatalog]; !ok {
return "", errors.New("default Artifact Hub pipeline catalog was not set during installation of the hub resolver")
}
if _, ok := paramsMap[ParamCatalog]; !ok {
switch paramsMap[ParamType] {
case ArtifactHubType:
switch paramsMap[ParamKind] {
case "task":
return configAHTaskCatalog, nil
case "pipeline":
return configAHPipelineCatalog, nil
default:
return "", fmt.Errorf("failed to resolve catalog name with kind: %s", paramsMap[ParamKind])
}
case TektonHubType:
return configTHCatalog, nil
default:
return "", fmt.Errorf("failed to resolve catalog name with type: %s", paramsMap[ParamType])
}
}
return paramsMap[ParamCatalog], nil
}
type artifactHubavailableVersionsResults struct {
Version string `json:"version"`
Prerelease bool `json:"prerelease"`
}
type artifactHubListResult struct {
AvailableVersions []artifactHubavailableVersionsResults `json:"available_versions"`
Version string `json:"version"`
}
type tektonHubListResultVersion struct {
Version string `json:"version"`
}
type tektonHubListDataResult struct {
Versions []tektonHubListResultVersion `json:"versions"`
}
type tektonHubListResult struct {
Data tektonHubListDataResult `json:"data"`
}
// the Artifact Hub follows the semVer (i.e. <major-version>.<minor-version>.0)
// the Tekton Hub follows the simplified semVer (i.e. <major-version>.<minor-version>)
// for resolution request with "artifact" type, we append ".0" suffix if the input version is simplified semVer
// for resolution request with "tekton" type, we only use <major-version>.<minor-version> part of the input if it is semVer
func resolveVersion(version, hubType string) (string, error) {
semVer := strings.Split(version, ".")
resVer := version
if hubType == ArtifactHubType && len(semVer) == 2 {
resVer = version + ".0"
} else if hubType == TektonHubType && len(semVer) > 2 {
resVer = strings.Join(semVer[0:2], ".")
}
return resVer, nil
}
func populateDefaultParams(ctx context.Context, params []pipelinev1.Param) (map[string]string, error) {
conf := framework.GetResolverConfigFromContext(ctx)
paramsMap := make(map[string]string)
for _, p := range params {
paramsMap[p.Name] = p.Value.StringVal
}
// type
if _, ok := paramsMap[ParamType]; !ok {
if typeString, ok := conf[ConfigType]; ok {
paramsMap[ParamType] = typeString
} else {
return nil, errors.New("default type was not set during installation of the hub resolver")
}
}
// kind
if _, ok := paramsMap[ParamKind]; !ok {
if kindString, ok := conf[ConfigKind]; ok {
paramsMap[ParamKind] = kindString
} else {
return nil, errors.New("default resource kind was not set during installation of the hub resolver")
}
}
// catalog
resCatName, err := resolveCatalogName(paramsMap, conf)
if err != nil {
return nil, err
}
paramsMap[ParamCatalog] = resCatName
return paramsMap, nil
}
func validateParams(ctx context.Context, paramsMap map[string]string, tektonHubURL string) error {
var missingParams []string
if _, ok := paramsMap[ParamName]; !ok {
missingParams = append(missingParams, ParamName)
}
if _, ok := paramsMap[ParamVersion]; !ok {
missingParams = append(missingParams, ParamVersion)
}
if kind, ok := paramsMap[ParamKind]; ok {
if !isSupportedKind(kind) {
return fmt.Errorf("kind param must be one of: %s", strings.Join(supportedKinds, ", "))
}
}
if hubType, ok := paramsMap[ParamType]; ok {
if hubType != ArtifactHubType && hubType != TektonHubType {
return fmt.Errorf("type param must be %s or %s", ArtifactHubType, TektonHubType)
}
if hubType == TektonHubType && tektonHubURL == "" {
return errors.New("please configure TEKTON_HUB_API env variable to use tekton type")
}
}
if len(missingParams) > 0 {
return fmt.Errorf("missing required hub resolver params: %s", strings.Join(missingParams, ", "))
}
return nil
}
func resolveVersionConstraint(ctx context.Context, paramsMap map[string]string, constraint goversion.Constraints, artifactHubURL, tektonHubURL string) (*goversion.Version, error) {
var ret *goversion.Version
if paramsMap[ParamType] == ArtifactHubType {
allVersionsURL := fmt.Sprintf("%s/%s", artifactHubURL, fmt.Sprintf(
ArtifactHubListTasksEndpoint,
paramsMap[ParamKind], paramsMap[ParamCatalog], paramsMap[ParamName]))
resp := artifactHubListResult{}
if err := fetchHubResource(ctx, allVersionsURL, &resp); err != nil {
return nil, fmt.Errorf("fail to fetch Artifact Hub resource: %w", err)
}
for _, vers := range resp.AvailableVersions {
if vers.Prerelease {
continue
}
checkV, err := goversion.NewVersion(vers.Version)
if err != nil {
return nil, fmt.Errorf("fail to parse version %s from %s: %w", ArtifactHubType, vers.Version, err)
}
if checkV == nil {
continue
}
if constraint.Check(checkV) {
if ret != nil && ret.GreaterThan(checkV) {
continue
}
// TODO(chmouel): log constraint result in controller
ret = checkV
}
}
} else if paramsMap[ParamType] == TektonHubType {
allVersionsURL := fmt.Sprintf("%s/%s", tektonHubURL,
fmt.Sprintf(TektonHubListTasksEndpoint,
paramsMap[ParamCatalog], paramsMap[ParamKind], paramsMap[ParamName]))
resp := tektonHubListResult{}
if err := fetchHubResource(ctx, allVersionsURL, &resp); err != nil {
return nil, fmt.Errorf("fail to fetch Tekton Hub resource: %w", err)
}
for _, vers := range resp.Data.Versions {
checkV, err := goversion.NewVersion(vers.Version)
if err != nil {
return nil, fmt.Errorf("fail to parse version %s from %s: %w", TektonHubType, vers, err)
}
if checkV == nil {
continue
}
if constraint.Check(checkV) {
if ret != nil && ret.GreaterThan(checkV) {
continue
}
// TODO(chmouel): log constraint result in controller
ret = checkV
}
}
}
if ret == nil {
return nil, fmt.Errorf("no version found for constraint %s", paramsMap[ParamVersion])
}
return ret, nil
}
func isSupportedKind(kindValue string) bool {
return slices.Contains[[]string, string](supportedKinds, kindValue)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"context"
"encoding/base64"
"errors"
"fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
rrlisters "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1"
common "github.com/tektoncd/pipeline/pkg/resolution/common"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
)
// CRDRequester implements the Requester interface using
// ResolutionRequest CRDs.
//
// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resource.CRDRequester] instead.
type CRDRequester struct {
clientset rrclient.Interface
lister rrlisters.ResolutionRequestLister
}
// NewCRDRequester returns an implementation of Requester that uses
// ResolutionRequest CRD objects to mediate between the caller who wants a
// resource (e.g. Tekton Pipelines) and the responder who can fetch
// it (e.g. the gitresolver)
//
// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resource.NewCRDRequester] instead.
func NewCRDRequester(clientset rrclient.Interface, lister rrlisters.ResolutionRequestLister) *CRDRequester {
return &CRDRequester{clientset, lister}
}
var _ Requester = &CRDRequester{}
// Submit constructs a ResolutionRequest object and submits it to the
// kubernetes cluster, returning any errors experienced while doing so.
// If ResolutionRequest is succeeded then it returns the resolved data.
func (r *CRDRequester) Submit(ctx context.Context, resolver ResolverName, req Request) (ResolvedResource, error) {
rr, _ := r.lister.ResolutionRequests(req.Namespace()).Get(req.Name())
if rr == nil {
if err := r.createResolutionRequest(ctx, resolver, req); err != nil &&
// When the request reconciles frequently, the creation may fail
// because the list informer cache is not updated.
// If the request already exists then we can assume that is in progress.
// The next reconcile will handle it based on the actual situation.
!apierrors.IsAlreadyExists(err) {
return nil, err
}
return nil, common.ErrRequestInProgress
}
if rr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() {
// TODO(sbwsg): This should be where an existing
// resource is given an additional owner reference so
// that it doesn't get deleted until the caller is done
// with it. Use appendOwnerReference and then submit
// update to ResolutionRequest.
return nil, common.ErrRequestInProgress
}
if rr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() {
return CrdIntoResource(rr), nil
}
message := rr.Status.GetCondition(apis.ConditionSucceeded).GetMessage()
err := common.NewError(common.ReasonResolutionFailed, errors.New(message))
return nil, err
}
func (r *CRDRequester) createResolutionRequest(ctx context.Context, resolver ResolverName, req Request) error {
var owner metav1.OwnerReference
if ownedReq, ok := req.(OwnedRequest); ok {
owner = ownedReq.OwnerRef()
}
rr := CreateResolutionRequest(ctx, resolver, req.Name(), req.Namespace(), req.Params(), owner)
_, err := r.clientset.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Create(ctx, rr, metav1.CreateOptions{})
return err
}
func CreateResolutionRequest(ctx context.Context, resolver common.ResolverName, name, namespace string, params []v1.Param, ownerRef metav1.OwnerReference) *v1beta1.ResolutionRequest {
rr := &v1beta1.ResolutionRequest{
TypeMeta: metav1.TypeMeta{
APIVersion: "resolution.tekton.dev/v1beta1",
Kind: "ResolutionRequest",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
common.LabelKeyResolverType: string(resolver),
},
},
Spec: v1beta1.ResolutionRequestSpec{
Params: params,
},
}
appendOwnerReference(rr, ownerRef)
return rr
}
func appendOwnerReference(rr *v1beta1.ResolutionRequest, ownerRef metav1.OwnerReference) {
isOwner := false
for _, ref := range rr.ObjectMeta.OwnerReferences {
if ownerRefsAreEqual(ref, ownerRef) {
isOwner = true
}
}
if !isOwner {
rr.ObjectMeta.OwnerReferences = append(rr.ObjectMeta.OwnerReferences, ownerRef)
}
}
func ownerRefsAreEqual(a, b metav1.OwnerReference) bool {
// pointers values cannot be directly compared.
if (a.Controller == nil && b.Controller != nil) ||
(a.Controller != nil && b.Controller == nil) ||
(*a.Controller != *b.Controller) {
return false
}
return a.APIVersion == b.APIVersion && a.Kind == b.Kind && a.Name == b.Name && a.UID == b.UID
}
// ReadOnlyResolutionRequest is an opaque wrapper around ResolutionRequest
// that provides the methods needed to read data from it using the
// Resource interface without exposing the underlying API
// object.
type ReadOnlyResolutionRequest struct {
req *v1beta1.ResolutionRequest
}
var _ common.ResolvedResource = ReadOnlyResolutionRequest{}
func CrdIntoResource(rr *v1beta1.ResolutionRequest) ReadOnlyResolutionRequest {
return ReadOnlyResolutionRequest{req: rr}
}
func (r ReadOnlyResolutionRequest) Annotations() map[string]string {
status := r.req.GetStatus()
if status != nil && status.Annotations != nil {
annotationsCopy := map[string]string{}
for key, val := range status.Annotations {
annotationsCopy[key] = val
}
return annotationsCopy
}
return nil
}
func (r ReadOnlyResolutionRequest) Data() ([]byte, error) {
encodedData := r.req.Status.ResolutionRequestStatusFields.Data
decodedBytes, err := base64.StdEncoding.Strict().DecodeString(encodedData)
if err != nil {
return nil, fmt.Errorf("error decoding data from base64: %w", err)
}
return decodedBytes, nil
}
func (r ReadOnlyResolutionRequest) RefSource() *v1.RefSource {
return r.req.Status.RefSource
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"fmt"
"hash"
"hash/fnv"
"sort"
"strings"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/kmeta"
)
const (
// ParamName is a param that explicitly assigns a name to the remote object
ParamName = "name"
// ParamURL is a param that hold the URL used for accesing the remote object
ParamURL = "url"
)
//
const maxLength = validation.DNS1123LabelMaxLength
// GenerateDeterministicName makes a best-effort attempt to create a
// unique but reproducible name for use in a Request. The returned value
// will have the format {prefix}-{hash} where {prefix} is
// given and {hash} is nameHasher(base) + nameHasher(param1) +
// nameHasher(param2) + ...
func GenerateDeterministicName(prefix, base string, params v1.Params) (string, error) {
return GenerateDeterministicNameFromSpec(prefix, base, &v1beta1.ResolutionRequestSpec{Params: params})
}
// GetNameAndNamespace determines the name and namespace for a resource request.
// It prioritizes explicit values, falling back to the owning object and "default" namespace.
// If needed, it generates a deterministic name to prevent duplicate requests within a context.
func GetNameAndNamespace(resolverName string, owner kmeta.OwnerRefable, name string, namespace string, req *v1beta1.ResolutionRequestSpec) (string, string, error) {
if name == "" {
name = owner.GetObjectMeta().GetName()
namespace = owner.GetObjectMeta().GetNamespace()
}
if namespace == "" {
namespace = "default"
}
// Generating a deterministic name for the resource request
// prevents multiple requests being issued for the same
// pipelinerun's pipelineRef or taskrun's taskRef.
remoteResourceBaseName := namespace + "/" + name
name, err := GenerateDeterministicNameFromSpec(resolverName, remoteResourceBaseName, req)
if err != nil {
return "", "", fmt.Errorf("error generating name for taskrun %s/%s: %w", namespace, name, err)
}
return name, namespace, nil
}
// nameHasher returns the hash.Hash to use when generating names.
func nameHasher() hash.Hash {
return fnv.New128a()
}
// GenerateDeterministicNameFromSpec makes a best-effort attempt to create a
// unique but reproducible name for use in a Request. The returned value
// will have the format {prefix}-{hash} where {prefix} is
// given and {hash} is nameHasher(base) + nameHasher(param1) +
// nameHasher(param2) + ...
func GenerateDeterministicNameFromSpec(prefix, base string, resolutionSpec *v1beta1.ResolutionRequestSpec) (string, error) {
hasher := nameHasher()
if _, err := hasher.Write([]byte(base)); err != nil {
return "", err
}
if resolutionSpec == nil {
return fmt.Sprintf("%s-%x", prefix, hasher.Sum(nil)), nil
}
params := resolutionSpec.Params
sortedParams := make(v1.Params, len(params))
for i := range params {
sortedParams[i] = *params[i].DeepCopy()
}
sort.SliceStable(sortedParams, func(i, j int) bool {
return sortedParams[i].Name < sortedParams[j].Name
})
for _, p := range sortedParams {
if _, err := hasher.Write([]byte(p.Name)); err != nil {
return "", err
}
switch p.Value.Type {
case v1.ParamTypeString:
if _, err := hasher.Write([]byte(p.Value.StringVal)); err != nil {
return "", err
}
case v1.ParamTypeArray, v1.ParamTypeObject:
asJSON, err := p.Value.MarshalJSON()
if err != nil {
return "", err
}
if _, err := hasher.Write(asJSON); err != nil {
return "", err
}
}
}
if len(resolutionSpec.URL) > 0 {
if _, err := hasher.Write([]byte(resolutionSpec.URL)); err != nil {
return "", err
}
}
name := fmt.Sprintf("%s-%x", prefix, hasher.Sum(nil))
if maxLength > len(name) {
return name, nil
}
return name[:strings.LastIndex(name[:maxLength], " ")], nil
}
// GenerateErrorLogString makes a best effort attempt to get the name of the task
// when a resolver error occurred. The TaskRef name does not have to be set, where
// the specific resolver gets the name from the parameters.
func GenerateErrorLogString(resolverType string, params v1.Params) string {
paramString := fmt.Sprintf("resolver type %s\n", resolverType)
for _, p := range params {
if p.Name == ParamName {
name := p.Value.StringVal
if p.Value.Type != v1.ParamTypeString {
asJSON, err := p.Value.MarshalJSON()
if err != nil {
paramString += fmt.Sprintf("name could not be marshalled: %s\n", err.Error())
continue
}
name = string(asJSON)
}
paramString += fmt.Sprintf("name = %s\n", name)
}
if p.Name == ParamURL {
paramString += fmt.Sprintf("url = %s\n", p.Value.StringVal)
}
}
return paramString
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
var _ Request = &BasicRequest{}
// BasicRequest holds the fields needed to submit a new resource request.
//
// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resource.BasicRequest] instead.
type BasicRequest struct {
name string
namespace string
params v1.Params
}
// NewRequest returns an instance of a BasicRequest with the given name,
// namespace and params.
//
// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resource.NewRequest] instead.
func NewRequest(name, namespace string, params v1.Params) Request {
return &BasicRequest{name, namespace, params}
}
// Name returns the name attached to the request
func (req *BasicRequest) Name() string {
return req.name
}
// Namespace returns the namespace that the request is associated with
func (req *BasicRequest) Namespace() string {
return req.namespace
}
// Params are the map of parameters associated with this request
func (req *BasicRequest) Params() v1.Params {
return req.params
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package result
import (
"encoding/json"
"errors"
"fmt"
)
const (
// TaskRunResultType default task run result value
TaskRunResultType ResultType = 1
// reserved: 2
// was RunResultType
// InternalTektonResultType default internal tekton result value
InternalTektonResultType = 3
// UnknownResultType default unknown result type value
UnknownResultType = 10
// StepResultType default step result value
StepResultType ResultType = 4
// StepArtifactsResultType default step artifacts result value
StepArtifactsResultType ResultType = 5
// TaskRunArtifactsResultType default taskRun artifacts result value
TaskRunArtifactsResultType ResultType = 6
)
// RunResult is used to write key/value pairs to TaskRun pod termination messages.
// The key/value pairs may come from the entrypoint binary, or represent a TaskRunResult.
// If they represent a TaskRunResult, the key is the name of the result and the value is the
// JSON-serialized value of the result.
type RunResult struct {
Key string `json:"key"`
Value string `json:"value"`
// ResourceName may be used in tests, but it is not populated in termination messages.
// It is preserved here for backwards compatibility and will not be ported to v1.
ResourceName string `json:"resourceName,omitempty"`
ResultType ResultType `json:"type,omitempty"`
}
// ResultType used to find out whether a RunResult is from a task result or not
// Note that ResultsType is another type which is used to define the data type
// (e.g. string, array, etc) we used for Results
//
//nolint:revive // revive complains about stutter of `result.ResultType`.
type ResultType int
// UnmarshalJSON unmarshals either an int or a string into a ResultType. String
// ResultTypes were removed because they made JSON messages bigger, which in
// turn limited the amount of space in termination messages for task results. String
// support is maintained for backwards compatibility - the Pipelines controller could
// be stopped midway through TaskRun execution, updated with support for int in place
// of string, and then fail the running TaskRun because it doesn't know how to interpret
// the string value that the TaskRun's entrypoint will emit when it completes.
func (r *ResultType) UnmarshalJSON(data []byte) error {
var asInt int
var intErr error
if err := json.Unmarshal(data, &asInt); err != nil {
intErr = err
} else {
*r = ResultType(asInt)
return nil
}
var asString string
if err := json.Unmarshal(data, &asString); err != nil {
return fmt.Errorf("unsupported value type, neither int nor string: %w", errors.Join(intErr, err))
}
switch asString {
case "StepResult":
*r = StepResultType
case "TaskRunResult":
*r = TaskRunResultType
case "InternalTektonResult":
*r = InternalTektonResultType
case "StepArtifactsResult":
*r = StepArtifactsResultType
case "TaskRunArtifactsResult":
*r = TaskRunArtifactsResultType
default:
*r = UnknownResultType
}
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"errors"
"fmt"
"sort"
"strings"
)
// SpireConfig holds the images reference for a number of container images used
// across tektoncd pipelines.
// +k8s:deepcopy-gen=true
type SpireConfig struct {
// The trust domain corresponds to the trust root of a SPIFFE identity provider.
TrustDomain string
// Path to the spire agent socket defined by the CSI driver
SocketPath string
// Spire server address
ServerAddr string
// Prefix to attach to the node name when registering it with the spire server
NodeAliasPrefix string
// MockSpire only to be used for testing the controller, will not exhibit
// all characteristics of spire since it is only being used in the context
// of process memory.
MockSpire bool
}
// Validate returns an error if any image is not set.
func (c SpireConfig) Validate() error {
var unset []string
for _, f := range []struct {
v, name string
}{
{c.TrustDomain, "spire-trust-domain"},
{c.SocketPath, "spire-socket-path"},
{c.ServerAddr, "spire-server-addr"},
{c.NodeAliasPrefix, "spire-node-alias-prefix"},
} {
if f.v == "" {
unset = append(unset, f.name)
}
}
if len(unset) > 0 {
sort.Strings(unset)
return fmt.Errorf("found unset spire configuration flags: %s", unset)
}
if !strings.HasPrefix(c.NodeAliasPrefix, "/") {
return errors.New("Spire node alias should start with a /")
}
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SpireConfig) DeepCopyInto(out *SpireConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpireConfig.
func (in *SpireConfig) DeepCopy() *SpireConfig {
if in == nil {
return nil
}
out := new(SpireConfig)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spire
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
"github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig"
"github.com/spiffe/go-spiffe/v2/svid/x509svid"
"github.com/spiffe/go-spiffe/v2/workloadapi"
entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1"
spiffetypes "github.com/spiffe/spire-api-sdk/proto/spire/api/types"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
spireconfig "github.com/tektoncd/pipeline/pkg/spire/config"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/rest"
"knative.dev/pkg/injection"
"knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterClient(withControllerClient)
}
// controllerKey is a way to associate the ControllerAPIClient from inside the context.Context
type controllerKey struct{}
// OnStore stores the changed spire config into the SpireClientApi
func OnStore(ctx context.Context, logger *zap.SugaredLogger) func(name string, value interface{}) {
return func(name string, value interface{}) {
if name == config.GetSpireConfigName() {
cfg, ok := value.(*spireconfig.SpireConfig)
if !ok {
logger.Error("Failed to do type assertion for extracting SPIRE config")
return
}
controllerAPIClient := GetControllerAPIClient(ctx)
controllerAPIClient.Close()
controllerAPIClient.SetConfig(*cfg)
}
}
}
// GetControllerAPIClient extracts the ControllerAPIClient from the context.
func GetControllerAPIClient(ctx context.Context) ControllerAPIClient {
untyped := ctx.Value(controllerKey{})
if untyped == nil {
logging.FromContext(ctx).Errorf("Unable to fetch client from context.")
return nil
}
return untyped.(ControllerAPIClient)
}
func withControllerClient(ctx context.Context, cfg *rest.Config) context.Context {
return context.WithValue(ctx, controllerKey{}, &spireControllerAPIClient{})
}
type spireControllerAPIClient struct {
config *spireconfig.SpireConfig
serverConn *grpc.ClientConn
workloadConn *workloadapi.X509Source
entryClient entryv1.EntryClient
workloadAPI *workloadapi.Client
}
var _ ControllerAPIClient = (*spireControllerAPIClient)(nil)
func (sc *spireControllerAPIClient) setupClient(ctx context.Context) error {
if sc.config == nil {
return errors.New("config has not been set yet")
}
if sc.entryClient == nil || sc.workloadConn == nil || sc.workloadAPI == nil || sc.serverConn == nil {
return sc.dial(ctx)
}
return nil
}
func (sc *spireControllerAPIClient) dial(ctx context.Context) error {
if sc.workloadConn == nil {
// Create X509Source - https://github.com/spiffe/go-spiffe/blob/main/v2/workloadapi/client.go
source, err := workloadapi.NewX509Source(ctx, workloadapi.WithClientOptions(workloadapi.WithAddr(sc.config.SocketPath)))
if err != nil {
return fmt.Errorf("unable to create X509Source for SPIFFE client: %w", err)
}
sc.workloadConn = source
}
if sc.workloadAPI == nil {
// spire workloadapi client for controller - https://github.com/spiffe/go-spiffe/blob/main/v2/workloadapi/client.go
client, err := workloadapi.New(ctx, workloadapi.WithAddr(sc.config.SocketPath))
if err != nil {
return fmt.Errorf("spire workload API not initialized due to error: %w", err)
}
sc.workloadAPI = client
}
if sc.serverConn == nil {
// Create connection to spire server
tlsConfig := tlsconfig.MTLSClientConfig(sc.workloadConn, sc.workloadConn, tlsconfig.AuthorizeAny())
conn, err := grpc.DialContext(ctx, sc.config.ServerAddr, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
if err != nil {
sc.workloadConn.Close()
sc.workloadConn = nil
return fmt.Errorf("unable to dial SPIRE server: %w", err)
}
sc.serverConn = conn
}
if sc.entryClient == nil {
sc.entryClient = entryv1.NewEntryClient(sc.serverConn)
}
return nil
}
// SetConfig sets the spire configuration for ControllerAPIClient
func (sc *spireControllerAPIClient) SetConfig(c spireconfig.SpireConfig) {
sc.config = &c
}
func (sc *spireControllerAPIClient) fetchControllerSVID(ctx context.Context) (*x509svid.SVID, error) {
xsvid, err := sc.workloadAPI.FetchX509SVID(ctx)
if err != nil {
return nil, fmt.Errorf("failed to fetch controller SVID: %w", err)
}
return xsvid, nil
}
func (sc *spireControllerAPIClient) nodeEntry(nodeName string) *spiffetypes.Entry {
selectors := []*spiffetypes.Selector{
{
Type: "k8s_psat",
Value: "agent_ns:spire",
},
{
Type: "k8s_psat",
Value: "agent_node_name:" + nodeName,
},
}
return &spiffetypes.Entry{
SpiffeId: &spiffetypes.SPIFFEID{
TrustDomain: sc.config.TrustDomain,
Path: fmt.Sprintf("%v%v", sc.config.NodeAliasPrefix, nodeName),
},
ParentId: &spiffetypes.SPIFFEID{
TrustDomain: sc.config.TrustDomain,
Path: "/spire/server",
},
Selectors: selectors,
}
}
func (sc *spireControllerAPIClient) workloadEntry(tr *v1beta1.TaskRun, pod *corev1.Pod, expiry int64) *spiffetypes.Entry {
// Note: We can potentially add attestation on the container images as well since
// the information is available here.
selectors := []*spiffetypes.Selector{
{
Type: "k8s",
Value: "pod-uid:" + string(pod.UID),
},
{
Type: "k8s",
Value: "pod-name:" + pod.Name,
},
}
return &spiffetypes.Entry{
SpiffeId: &spiffetypes.SPIFFEID{
TrustDomain: sc.config.TrustDomain,
Path: fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name),
},
ParentId: &spiffetypes.SPIFFEID{
TrustDomain: sc.config.TrustDomain,
Path: fmt.Sprintf("%v%v", sc.config.NodeAliasPrefix, pod.Spec.NodeName),
},
Selectors: selectors,
ExpiresAt: expiry,
}
}
// ttl is the TTL for the SPIRE entry in seconds, not the SVID TTL
func (sc *spireControllerAPIClient) CreateEntries(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod, ttl time.Duration) error {
err := sc.setupClient(ctx)
if err != nil {
return err
}
expiryTime := time.Now().Unix() + int64(ttl)
entries := []*spiffetypes.Entry{
sc.nodeEntry(pod.Spec.NodeName),
sc.workloadEntry(tr, pod, expiryTime),
}
req := entryv1.BatchCreateEntryRequest{
Entries: entries,
}
resp, err := sc.entryClient.BatchCreateEntry(ctx, &req)
if err != nil {
return err
}
if len(resp.GetResults()) != len(entries) {
return fmt.Errorf("batch create entry failed, malformed response expected %v result", len(entries))
}
var errPaths []string
var errCodes []int32
for _, r := range resp.GetResults() {
statusCode := r.GetStatus().GetCode()
if statusCode < 0 {
return fmt.Errorf("statusCode overflows uint32: %d", statusCode)
}
code := codes.Code(statusCode)
if code != codes.AlreadyExists && code != codes.OK {
errPaths = append(errPaths, r.GetEntry().GetSpiffeId().GetPath())
errCodes = append(errCodes, statusCode)
}
}
if len(errPaths) != 0 {
return fmt.Errorf("batch create entry failed for entries %+v with codes %+v", errPaths, errCodes)
}
return nil
}
func (sc *spireControllerAPIClient) getEntries(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod) ([]*spiffetypes.Entry, error) {
req := &entryv1.ListEntriesRequest{
Filter: &entryv1.ListEntriesRequest_Filter{
BySpiffeId: &spiffetypes.SPIFFEID{
TrustDomain: sc.config.TrustDomain,
Path: fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name),
},
},
}
entries := []*spiffetypes.Entry{}
for {
resp, err := sc.entryClient.ListEntries(ctx, req)
if err != nil {
return nil, err
}
entries = append(entries, resp.GetEntries()...)
if resp.GetNextPageToken() == "" {
break
}
req.PageToken = resp.GetNextPageToken()
}
return entries, nil
}
func (sc *spireControllerAPIClient) DeleteEntry(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod) error {
entries, err := sc.getEntries(ctx, tr, pod)
if err != nil {
return err
}
var ids []string
for _, e := range entries {
ids = append(ids, e.GetId())
}
req := &entryv1.BatchDeleteEntryRequest{
Ids: ids,
}
resp, err := sc.entryClient.BatchDeleteEntry(ctx, req)
if err != nil {
return err
}
var errIds []string
var errCodes []int32
for _, r := range resp.GetResults() {
statusCode := r.GetStatus().GetCode()
if statusCode < 0 {
return fmt.Errorf("statusCode overflows uint32: %d", statusCode)
}
code := codes.Code(statusCode)
if code != codes.NotFound && code != codes.OK {
errIds = append(errIds, r.GetId())
errCodes = append(errCodes, statusCode)
}
}
if len(errIds) != 0 {
return fmt.Errorf("batch delete entry failed for ids %+v with codes %+v", errIds, errCodes)
}
return nil
}
func (sc *spireControllerAPIClient) Close() error {
var err error
if sc.serverConn != nil {
err = sc.serverConn.Close()
if err != nil {
return err
}
sc.serverConn = nil
}
if sc.workloadAPI != nil {
err = sc.workloadAPI.Close()
if err != nil {
return err
}
sc.workloadAPI = nil
}
if sc.workloadConn != nil {
err = sc.workloadConn.Close()
if err != nil {
return err
}
sc.workloadConn = nil
}
sc.entryClient = nil
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spire
import (
"context"
"time"
"github.com/pkg/errors"
"github.com/spiffe/go-spiffe/v2/svid/x509svid"
"github.com/spiffe/go-spiffe/v2/workloadapi"
spireconfig "github.com/tektoncd/pipeline/pkg/spire/config"
)
// NewEntrypointerAPIClient creates the EntrypointerAPIClient
func NewEntrypointerAPIClient(c *spireconfig.SpireConfig) EntrypointerAPIClient {
return &spireEntrypointerAPIClient{
config: c,
}
}
type spireEntrypointerAPIClient struct {
config *spireconfig.SpireConfig
client *workloadapi.Client
}
func (w *spireEntrypointerAPIClient) setupClient(ctx context.Context) error {
if w.config == nil {
return errors.New("config has not been set yet")
}
if w.client == nil {
return w.dial(ctx)
}
return nil
}
func (w *spireEntrypointerAPIClient) dial(ctx context.Context) error {
// spire workloadapi client for entrypoint - https://github.com/spiffe/go-spiffe/blob/main/v2/workloadapi/client.go
client, err := workloadapi.New(ctx, workloadapi.WithAddr(w.config.SocketPath))
if err != nil {
return errors.Wrap(err, "spire workload API not initialized due to error")
}
w.client = client
return nil
}
// package-level timeout and backoff enable shortened timeout for unit tests
var (
timeout = 20 * time.Second
backoff = 2 * time.Second
)
func (w *spireEntrypointerAPIClient) getWorkloadSVID(ctx context.Context) (*x509svid.SVID, error) {
// Should this be using exponential backoff? IDK enough about the underlying
// implementation to know if exponential backoff is in fact justified, so
// when I modified this code to use a ticker I didn't change the backoff
// logic.
ticker := time.NewTicker(backoff)
defer ticker.Stop()
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
for {
var xsvid *x509svid.SVID
var err error
if xsvid, err = w.client.FetchX509SVID(ctx); err == nil { // No err -- return immediately on success
return xsvid, nil
}
select {
case <-ticker.C:
// do nothing; loop will try again
case <-ctx.Done():
// ctx timed out or was cancelled
return nil, errors.Wrap(ctx.Err(), errors.Wrap(err, "failed to fetch SVID").Error())
}
}
}
func (w *spireEntrypointerAPIClient) Close() error {
if w.client != nil {
return w.client.Close()
}
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spire
import (
"context"
"crypto"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/pem"
"errors"
"strings"
"github.com/spiffe/go-spiffe/v2/svid/x509svid"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/result"
)
// Signs the TaskRun results with the TaskRun spire SVID and appends the results to RunResult
func (w *spireEntrypointerAPIClient) Sign(ctx context.Context, results []result.RunResult) ([]result.RunResult, error) {
err := w.setupClient(ctx)
if err != nil {
return nil, err
}
xsvid, err := w.getWorkloadSVID(ctx)
if err != nil {
return nil, err
}
if len(xsvid.Certificates) == 0 {
return nil, errors.New("returned workload svid does not have certificates")
}
output := []result.RunResult{}
p := pem.EncodeToMemory(&pem.Block{
Bytes: xsvid.Certificates[0].Raw,
Type: "CERTIFICATE",
})
output = append(output, result.RunResult{
Key: KeySVID,
Value: string(p),
ResultType: result.TaskRunResultType,
})
for _, r := range results {
if r.ResultType == result.TaskRunResultType {
resultValue, err := getResultValue(r)
if err != nil {
return nil, err
}
s, err := signWithKey(xsvid, resultValue)
if err != nil {
return nil, err
}
output = append(output, result.RunResult{
Key: r.Key + KeySignatureSuffix,
Value: base64.StdEncoding.EncodeToString(s),
ResultType: result.TaskRunResultType,
})
}
}
// get complete manifest of keys such that it can be verified
manifest := getManifest(results)
output = append(output, result.RunResult{
Key: KeyResultManifest,
Value: manifest,
ResultType: result.TaskRunResultType,
})
manifestSig, err := signWithKey(xsvid, manifest)
if err != nil {
return nil, err
}
output = append(output, result.RunResult{
Key: KeyResultManifest + KeySignatureSuffix,
Value: base64.StdEncoding.EncodeToString(manifestSig),
ResultType: result.TaskRunResultType,
})
return output, nil
}
func signWithKey(xsvid *x509svid.SVID, value string) ([]byte, error) {
dgst := sha256.Sum256([]byte(value))
s, err := xsvid.PrivateKey.Sign(rand.Reader, dgst[:], crypto.SHA256)
if err != nil {
return nil, err
}
return s, nil
}
func getManifest(results []result.RunResult) string {
keys := []string{}
for _, r := range results {
if strings.HasSuffix(r.Key, KeySignatureSuffix) || r.Key == KeySVID || r.ResultType != result.TaskRunResultType {
continue
}
keys = append(keys, r.Key)
}
return strings.Join(keys, ",")
}
// AppendStatusInternalAnnotation creates the status annotations which are used by the controller to verify the status hash
func (sc *spireControllerAPIClient) AppendStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun) error {
err := sc.setupClient(ctx)
if err != nil {
return err
}
// Add status hash
currentHash, err := hashTaskrunStatusInternal(tr)
if err != nil {
return err
}
// Sign with controller private key
xsvid, err := sc.fetchControllerSVID(ctx)
if err != nil {
return err
}
sig, err := signWithKey(xsvid, currentHash)
if err != nil {
return err
}
if len(xsvid.Certificates) == 0 {
return errors.New("returned controller svid does not have certificates")
}
// Store Controller SVID
p := pem.EncodeToMemory(&pem.Block{
Bytes: xsvid.Certificates[0].Raw,
Type: "CERTIFICATE",
})
if tr.Status.Annotations == nil {
tr.Status.Annotations = map[string]string{}
}
tr.Status.Annotations[controllerSvidAnnotation] = string(p)
tr.Status.Annotations[TaskRunStatusHashAnnotation] = currentHash
tr.Status.Annotations[taskRunStatusHashSigAnnotation] = base64.StdEncoding.EncodeToString(sig)
return nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spire
import (
"context"
"crypto/sha256"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/result"
spireconfig "github.com/tektoncd/pipeline/pkg/spire/config"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/rest"
"knative.dev/pkg/injection"
)
func init() {
injection.Fake.RegisterClient(withFakeControllerClient)
}
func withFakeControllerClient(ctx context.Context, cfg *rest.Config) context.Context {
return context.WithValue(ctx, controllerKey{}, &spireControllerAPIClient{})
}
// MockClient is a client used for mocking the this package for unit testing
// other tekton components that use the spire entrypointer or controller client.
//
// The MockClient implements both SpireControllerApiClient and SpireEntrypointerApiClient
// and in addition to that provides the helper functions to define and query internal state.
type MockClient struct {
// Entries is a dictionary of entries that mock the SPIRE server datastore (for function Sign only)
Entries map[string]bool
// SignIdentities represents the list of identities to use to sign (providing context of a caller to Sign)
// when Sign is called, the identity is dequeued from the slice. A signature will only be provided if the
// corresponding entry is in Entries. This only takes effect if SignOverride is nil.
SignIdentities []string
// VerifyAlwaysReturns defines whether to always verify successfully or to always fail verification if non-nil.
// This only take effect on Verify functions:
// - VerifyStatusInternalAnnotationOverride
// - VerifyTaskRunResultsOverride
VerifyAlwaysReturns *bool
// VerifyStatusInternalAnnotationOverride contains the function to overwrite a call to VerifyStatusInternalAnnotation
VerifyStatusInternalAnnotationOverride func(ctx context.Context, tr *v1beta1.TaskRun, logger *zap.SugaredLogger) error
// VerifyTaskRunResultsOverride contains the function to overwrite a call to VerifyTaskRunResults
VerifyTaskRunResultsOverride func(ctx context.Context, prs []result.RunResult, tr *v1beta1.TaskRun) error
// AppendStatusInternalAnnotationOverride contains the function to overwrite a call to AppendStatusInternalAnnotation
AppendStatusInternalAnnotationOverride func(ctx context.Context, tr *v1beta1.TaskRun) error
// CheckSpireVerifiedFlagOverride contains the function to overwrite a call to CheckSpireVerifiedFlag
CheckSpireVerifiedFlagOverride func(tr *v1beta1.TaskRun) bool
// SignOverride contains the function to overwrite a call to Sign
SignOverride func(ctx context.Context, results []result.RunResult) ([]result.RunResult, error)
}
var _ ControllerAPIClient = (*MockClient)(nil)
var _ EntrypointerAPIClient = (*MockClient)(nil)
const controllerSvid = "CONTROLLER_SVID_DATA"
func (*MockClient) mockSign(content, signedBy string) string {
return fmt.Sprintf("signed-by-%s:%x", signedBy, sha256.Sum256([]byte(content)))
}
func (sc *MockClient) mockVerify(content, sig, signedBy string) bool {
return sig == sc.mockSign(content, signedBy)
}
// GetIdentity get the taskrun namespace and taskrun name that is used for signing and verifying in mocked spire
func (*MockClient) GetIdentity(tr *v1beta1.TaskRun) string {
return fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name)
}
// AppendStatusInternalAnnotation creates the status annotations which are used by the controller to verify the status hash
func (sc *MockClient) AppendStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun) error {
if sc.AppendStatusInternalAnnotationOverride != nil {
return sc.AppendStatusInternalAnnotationOverride(ctx, tr)
}
// Add status hash
currentHash, err := hashTaskrunStatusInternal(tr)
if err != nil {
return err
}
if tr.Status.Annotations == nil {
tr.Status.Annotations = map[string]string{}
}
tr.Status.Annotations[controllerSvidAnnotation] = controllerSvid
tr.Status.Annotations[TaskRunStatusHashAnnotation] = currentHash
tr.Status.Annotations[taskRunStatusHashSigAnnotation] = sc.mockSign(currentHash, "controller")
return nil
}
// CheckSpireVerifiedFlag checks if the verified status annotation is set which would result in spire verification failed
func (sc *MockClient) CheckSpireVerifiedFlag(tr *v1beta1.TaskRun) bool {
if sc.CheckSpireVerifiedFlagOverride != nil {
return sc.CheckSpireVerifiedFlagOverride(tr)
}
_, ok := tr.Status.Annotations[VerifiedAnnotation]
return !ok
}
// CreateEntries adds entries to the dictionary of entries that mock the SPIRE server datastore
func (sc *MockClient) CreateEntries(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod, ttl time.Duration) error {
id := fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name)
if sc.Entries == nil {
sc.Entries = map[string]bool{}
}
sc.Entries[id] = true
return nil
}
// DeleteEntry removes the entry from the dictionary of entries that mock the SPIRE server datastore
func (sc *MockClient) DeleteEntry(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod) error {
id := fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name)
if sc.Entries != nil {
delete(sc.Entries, id)
}
return nil
}
// VerifyStatusInternalAnnotation checks that the internal status annotations are valid by the mocked spire client
func (sc *MockClient) VerifyStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun, logger *zap.SugaredLogger) error {
if sc.VerifyStatusInternalAnnotationOverride != nil {
return sc.VerifyStatusInternalAnnotationOverride(ctx, tr, logger)
}
if sc.VerifyAlwaysReturns != nil {
if *sc.VerifyAlwaysReturns {
return nil
}
return errors.New("failed to verify from mock VerifyAlwaysReturns")
}
if !sc.CheckSpireVerifiedFlag(tr) {
return errors.New("annotation tekton.dev/not-verified = yes failed spire verification")
}
annotations := tr.Status.Annotations
// Verify annotations are there
if annotations[controllerSvidAnnotation] != controllerSvid {
return errors.New("svid annotation missing")
}
// Check signature
currentHash, err := hashTaskrunStatusInternal(tr)
if err != nil {
return err
}
if !sc.mockVerify(currentHash, annotations[taskRunStatusHashSigAnnotation], "controller") {
return errors.New("signature was not able to be verified")
}
// check current status hash vs annotation status hash by controller
return CheckStatusInternalAnnotation(tr)
}
// VerifyTaskRunResults checks that all the TaskRun results are valid by the mocked spire client
func (sc *MockClient) VerifyTaskRunResults(ctx context.Context, prs []result.RunResult, tr *v1beta1.TaskRun) error {
if sc.VerifyTaskRunResultsOverride != nil {
return sc.VerifyTaskRunResultsOverride(ctx, prs, tr)
}
if sc.VerifyAlwaysReturns != nil {
if *sc.VerifyAlwaysReturns {
return nil
}
return errors.New("failed to verify from mock VerifyAlwaysReturns")
}
resultMap := map[string]result.RunResult{}
for _, r := range prs {
if r.ResultType == result.TaskRunResultType {
resultMap[r.Key] = r
}
}
var identity string
// Get SVID identity
for k, p := range resultMap {
if k == KeySVID {
identity = p.Value
break
}
}
// Verify manifest
if err := verifyManifest(resultMap); err != nil {
return err
}
if identity != sc.GetIdentity(tr) {
return errors.New("mock identity did not match")
}
for key, r := range resultMap {
if strings.HasSuffix(key, KeySignatureSuffix) {
continue
}
if key == KeySVID {
continue
}
sigEntry, ok := resultMap[key+KeySignatureSuffix]
sigValue, err := getResultValue(sigEntry)
if err != nil {
return err
}
resultValue, err := getResultValue(r)
if err != nil {
return err
}
if !ok || !sc.mockVerify(resultValue, sigValue, identity) {
return errors.Errorf("failed to verify field: %v", key)
}
}
return nil
}
// Sign signs and appends signatures to the RunResult based on the mocked spire client
func (sc *MockClient) Sign(ctx context.Context, results []result.RunResult) ([]result.RunResult, error) {
if sc.SignOverride != nil {
return sc.SignOverride(ctx, results)
}
if len(sc.SignIdentities) == 0 {
return nil, errors.New("signIdentities empty, please provide identities to sign with the MockClient.GetIdentity function")
}
identity := sc.SignIdentities[0]
sc.SignIdentities = sc.SignIdentities[1:]
if !sc.Entries[identity] {
return nil, errors.Errorf("entry doesn't exist for identity: %v", identity)
}
output := []result.RunResult{}
output = append(output, result.RunResult{
Key: KeySVID,
Value: identity,
ResultType: result.TaskRunResultType,
})
for _, r := range results {
if r.ResultType == result.TaskRunResultType {
resultValue, err := getResultValue(r)
if err != nil {
return nil, err
}
s := sc.mockSign(resultValue, identity)
output = append(output, result.RunResult{
Key: r.Key + KeySignatureSuffix,
Value: s,
ResultType: result.TaskRunResultType,
})
}
}
// get complete manifest of keys such that it can be verified
manifest := getManifest(results)
output = append(output, result.RunResult{
Key: KeyResultManifest,
Value: manifest,
ResultType: result.TaskRunResultType,
})
manifestSig := sc.mockSign(manifest, identity)
output = append(output, result.RunResult{
Key: KeyResultManifest + KeySignatureSuffix,
Value: manifestSig,
ResultType: result.TaskRunResultType,
})
return output, nil
}
// Close mock closing the spire client connection
func (*MockClient) Close() error { return nil }
// SetConfig sets the spire configuration for MockClient
func (*MockClient) SetConfig(spireconfig.SpireConfig) {}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"crypto"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"math/big"
"net"
"net/url"
"testing"
"time"
"github.com/go-jose/go-jose/v3"
"github.com/go-jose/go-jose/v3/cryptosigner"
"github.com/go-jose/go-jose/v3/jwt"
"github.com/spiffe/go-spiffe/v2/bundle/jwtbundle"
"github.com/spiffe/go-spiffe/v2/bundle/spiffebundle"
"github.com/spiffe/go-spiffe/v2/bundle/x509bundle"
"github.com/spiffe/go-spiffe/v2/spiffeid"
"github.com/spiffe/go-spiffe/v2/svid/jwtsvid"
"github.com/spiffe/go-spiffe/v2/svid/x509svid"
"github.com/tektoncd/pipeline/pkg/spire/test/x509util"
)
var (
localhostIPs = []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback}
)
type CA struct {
tb testing.TB
td spiffeid.TrustDomain
parent *CA
cert *x509.Certificate
key crypto.Signer
jwtKey crypto.Signer
jwtKid string
}
type CertificateOption interface {
apply(*x509.Certificate)
}
type certificateOption func(*x509.Certificate)
func (co certificateOption) apply(c *x509.Certificate) {
co(c)
}
func NewCA(tb testing.TB, td spiffeid.TrustDomain) *CA {
cert, key := CreateCACertificate(tb, nil, nil)
return &CA{
tb: tb,
td: td,
cert: cert,
key: key,
jwtKey: NewEC256Key(tb),
jwtKid: NewKeyID(tb),
}
}
func (ca *CA) ChildCA(options ...CertificateOption) *CA {
cert, key := CreateCACertificate(ca.tb, ca.cert, ca.key, options...)
return &CA{
tb: ca.tb,
parent: ca,
cert: cert,
key: key,
jwtKey: NewEC256Key(ca.tb),
jwtKid: NewKeyID(ca.tb),
}
}
func (ca *CA) CreateX509SVID(id spiffeid.ID, options ...CertificateOption) *x509svid.SVID {
cert, key := CreateX509SVID(ca.tb, ca.cert, ca.key, id, options...)
return &x509svid.SVID{
ID: id,
Certificates: append([]*x509.Certificate{cert}, ca.chain(false)...),
PrivateKey: key,
}
}
func (ca *CA) CreateX509Certificate(options ...CertificateOption) ([]*x509.Certificate, crypto.Signer) {
cert, key := CreateX509Certificate(ca.tb, ca.cert, ca.key, options...)
return append([]*x509.Certificate{cert}, ca.chain(false)...), key
}
func (ca *CA) CreateJWTSVID(id spiffeid.ID, audience []string) *jwtsvid.SVID {
claims := jwt.Claims{
Subject: id.String(),
Issuer: "FAKECA",
Audience: audience,
IssuedAt: jwt.NewNumericDate(time.Now()),
Expiry: jwt.NewNumericDate(time.Now().Add(time.Hour)),
}
jwtSigner, err := jose.NewSigner(
jose.SigningKey{
Algorithm: jose.ES256,
Key: jose.JSONWebKey{
Key: cryptosigner.Opaque(ca.jwtKey),
KeyID: ca.jwtKid,
},
},
new(jose.SignerOptions).WithType("JWT"),
)
if err != nil {
ca.tb.Fatalf("failed to convert claims to Struct: %v", err)
}
signedToken, err := jwt.Signed(jwtSigner).Claims(claims).CompactSerialize()
if err != nil {
ca.tb.Fatalf("failed to convert claims to Struct: %v", err)
}
svid, err := jwtsvid.ParseInsecure(signedToken, audience)
if err != nil {
ca.tb.Fatalf("failed to convert claims to Struct: %v", err)
}
return svid
}
func (ca *CA) X509Authorities() []*x509.Certificate {
root := ca
for root.parent != nil {
root = root.parent
}
return []*x509.Certificate{root.cert}
}
func (ca *CA) JWTAuthorities() map[string]crypto.PublicKey {
return map[string]crypto.PublicKey{
ca.jwtKid: ca.jwtKey.Public(),
}
}
func (ca *CA) Bundle() *spiffebundle.Bundle {
bundle := spiffebundle.New(ca.td)
bundle.SetX509Authorities(ca.X509Authorities())
bundle.SetJWTAuthorities(ca.JWTAuthorities())
return bundle
}
func (ca *CA) X509Bundle() *x509bundle.Bundle {
return x509bundle.FromX509Authorities(ca.td, ca.X509Authorities())
}
func (ca *CA) JWTBundle() *jwtbundle.Bundle {
return jwtbundle.FromJWTAuthorities(ca.td, ca.JWTAuthorities())
}
func CreateCACertificate(tb testing.TB, parent *x509.Certificate, parentKey crypto.Signer, options ...CertificateOption) (*x509.Certificate, crypto.Signer) {
now := time.Now()
serial := NewSerial(tb)
key := NewEC256Key(tb)
tmpl := &x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{
CommonName: fmt.Sprintf("CA %x", serial),
},
BasicConstraintsValid: true,
IsCA: true,
NotBefore: now,
NotAfter: now.Add(time.Hour),
}
applyOptions(tmpl, options...)
if parent == nil {
parent = tmpl
parentKey = key
}
return CreateCertificate(tb, tmpl, parent, key.Public(), parentKey), key
}
func CreateX509Certificate(tb testing.TB, parent *x509.Certificate, parentKey crypto.Signer, options ...CertificateOption) (*x509.Certificate, crypto.Signer) {
now := time.Now()
serial := NewSerial(tb)
key := NewEC256Key(tb)
tmpl := &x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{
CommonName: fmt.Sprintf("X509-Certificate %x", serial),
},
NotBefore: now,
NotAfter: now.Add(time.Hour),
KeyUsage: x509.KeyUsageDigitalSignature,
}
applyOptions(tmpl, options...)
return CreateCertificate(tb, tmpl, parent, key.Public(), parentKey), key
}
func CreateX509SVID(tb testing.TB, parent *x509.Certificate, parentKey crypto.Signer, id spiffeid.ID, options ...CertificateOption) (*x509.Certificate, crypto.Signer) {
serial := NewSerial(tb)
options = append(options,
WithSerial(serial),
WithKeyUsage(x509.KeyUsageDigitalSignature),
WithSubject(pkix.Name{
CommonName: fmt.Sprintf("X509-SVID %x", serial),
}),
WithURIs(id.URL()))
return CreateX509Certificate(tb, parent, parentKey, options...)
}
func CreateCertificate(tb testing.TB, tmpl, parent *x509.Certificate, pub, priv interface{}) *x509.Certificate {
certDER, err := x509.CreateCertificate(rand.Reader, tmpl, parent, pub, priv)
if err != nil {
tb.Fatalf("failed to create listener: %v", err)
}
cert, err := x509.ParseCertificate(certDER)
if err != nil {
tb.Fatalf("failed to create listener: %v", err)
}
return cert
}
func CreateWebCredentials(t testing.TB) (*x509.CertPool, *tls.Certificate) {
rootCert, rootKey := CreateCACertificate(t, nil, nil)
childCert, childKey := CreateX509Certificate(t, rootCert, rootKey,
WithIPAddresses(localhostIPs...))
return x509util.NewCertPool([]*x509.Certificate{rootCert}),
&tls.Certificate{
Certificate: [][]byte{childCert.Raw},
PrivateKey: childKey,
}
}
func NewSerial(tb testing.TB) *big.Int {
b := make([]byte, 8)
_, err := rand.Read(b)
if err != nil {
tb.Fatalf("failed to create listener: %v", err)
}
return new(big.Int).SetBytes(b)
}
func WithSerial(serial *big.Int) CertificateOption {
return certificateOption(func(c *x509.Certificate) {
c.SerialNumber = serial
})
}
func WithKeyUsage(keyUsage x509.KeyUsage) CertificateOption {
return certificateOption(func(c *x509.Certificate) {
c.KeyUsage = keyUsage
})
}
func WithLifetime(notBefore, notAfter time.Time) CertificateOption {
return certificateOption(func(c *x509.Certificate) {
c.NotBefore = notBefore
c.NotAfter = notAfter
})
}
func WithIPAddresses(ips ...net.IP) CertificateOption {
return certificateOption(func(c *x509.Certificate) {
c.IPAddresses = ips
})
}
func WithURIs(uris ...*url.URL) CertificateOption {
return certificateOption(func(c *x509.Certificate) {
c.URIs = uris
})
}
func WithSubject(subject pkix.Name) CertificateOption {
return certificateOption(func(c *x509.Certificate) {
c.Subject = subject
})
}
func applyOptions(c *x509.Certificate, options ...CertificateOption) {
for _, opt := range options {
opt.apply(c)
}
}
func (ca *CA) chain(includeRoot bool) []*x509.Certificate {
chain := []*x509.Certificate{}
next := ca
for next != nil {
if includeRoot || next.parent != nil {
chain = append(chain, next.cert)
}
next = next.parent
}
return chain
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fakebundleendpoint
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/http"
"sync"
"testing"
"github.com/spiffe/go-spiffe/v2/bundle/spiffebundle"
"github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig"
"github.com/spiffe/go-spiffe/v2/svid/x509svid"
"github.com/tektoncd/pipeline/pkg/spire/test"
"github.com/tektoncd/pipeline/pkg/spire/test/x509util"
)
type Server struct {
tb testing.TB
wg sync.WaitGroup
addr net.Addr
httpServer *http.Server
// Root certificates used by clients to verify server certificates.
rootCAs *x509.CertPool
// TLS configuration used by the server.
tlscfg *tls.Config
// SPIFFE bundles that can be returned by this Server.
bundles []*spiffebundle.Bundle
}
type ServerOption interface {
apply(*Server)
}
func New(tb testing.TB, option ...ServerOption) *Server {
rootCAs, cert := test.CreateWebCredentials(tb)
tlscfg := &tls.Config{
Certificates: []tls.Certificate{*cert},
}
s := &Server{
tb: tb,
rootCAs: rootCAs,
tlscfg: tlscfg,
}
for _, opt := range option {
opt.apply(s)
}
sm := http.NewServeMux()
sm.HandleFunc("/test-bundle", s.testbundle)
s.httpServer = &http.Server{
Handler: sm,
TLSConfig: s.tlscfg,
}
err := s.start()
if err != nil {
tb.Fatalf("Failed to start: %v", err)
}
return s
}
func (s *Server) Shutdown() {
err := s.httpServer.Shutdown(context.Background())
if err != nil {
s.tb.Errorf("unexpected error: %v", err)
}
s.wg.Wait()
}
func (s *Server) Addr() string {
return s.addr.String()
}
func (s *Server) FetchBundleURL() string {
return fmt.Sprintf("https://%s/test-bundle", s.Addr())
}
func (s *Server) RootCAs() *x509.CertPool {
return s.rootCAs
}
func (s *Server) start() error {
ln, err := net.Listen("tcp", "127.0.0.1:")
if err != nil {
return err
}
s.addr = ln.Addr()
s.wg.Add(1)
go func() {
err := s.httpServer.ServeTLS(ln, "", "")
if err != nil || err.Error() != http.ErrServerClosed.Error() {
s.tb.Errorf("expected error %q, got %v", http.ErrServerClosed.Error(), err)
}
s.wg.Done()
ln.Close()
}()
return nil
}
func (s *Server) testbundle(w http.ResponseWriter, r *http.Request) {
if len(s.bundles) == 0 {
w.WriteHeader(http.StatusNotFound)
return
}
bb, err := s.bundles[0].Marshal()
if err != nil {
s.tb.Errorf("unexpected error: %v", err)
}
s.bundles = s.bundles[1:]
w.Header().Add("Content-Type", "application/json")
b, err := w.Write(bb)
if err != nil {
s.tb.Errorf("unexpected error: %v", err)
}
if len(bb) != b {
s.tb.Errorf("expected written bytes %d, got %d", len(bb), b)
}
}
type serverOption func(*Server)
// WithTestBundles sets the bundles that are returned by the Bundle Endpoint. You can
// specify several bundles, which are going to be returned one at a time each time
// a bundle is GET by a client.
func WithTestBundles(bundles ...*spiffebundle.Bundle) ServerOption {
return serverOption(func(s *Server) {
s.bundles = bundles
})
}
func WithSPIFFEAuth(bundle *spiffebundle.Bundle, svid *x509svid.SVID) ServerOption {
return serverOption(func(s *Server) {
s.rootCAs = x509util.NewCertPool(bundle.X509Authorities())
s.tlscfg = tlsconfig.TLSServerConfig(svid)
})
}
func (so serverOption) apply(s *Server) {
so(s)
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fakeworkloadapi
import (
"context"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"sync"
"testing"
"github.com/spiffe/go-spiffe/v2/bundle/jwtbundle"
"github.com/spiffe/go-spiffe/v2/bundle/x509bundle"
"github.com/spiffe/go-spiffe/v2/proto/spiffe/workload"
"github.com/spiffe/go-spiffe/v2/svid/jwtsvid"
"github.com/spiffe/go-spiffe/v2/svid/x509svid"
"github.com/tektoncd/pipeline/pkg/spire/test/pemutil"
"github.com/tektoncd/pipeline/pkg/spire/test/x509util"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/types/known/structpb"
)
var noIdentityError = status.Error(codes.PermissionDenied, "no identity issued")
type WorkloadAPI struct {
tb testing.TB
wg sync.WaitGroup
addr string
server *grpc.Server
mu sync.Mutex
x509Resp *workload.X509SVIDResponse
x509Chans map[chan *workload.X509SVIDResponse]struct{}
jwtResp *workload.JWTSVIDResponse
jwtBundlesResp *workload.JWTBundlesResponse
jwtBundlesChans map[chan *workload.JWTBundlesResponse]struct{}
x509BundlesResp *workload.X509BundlesResponse
x509BundlesChans map[chan *workload.X509BundlesResponse]struct{}
}
func New(tb testing.TB) *WorkloadAPI {
w := &WorkloadAPI{
x509Chans: make(map[chan *workload.X509SVIDResponse]struct{}),
jwtBundlesChans: make(map[chan *workload.JWTBundlesResponse]struct{}),
x509BundlesChans: make(map[chan *workload.X509BundlesResponse]struct{}),
}
listener, err := newListener()
if err != nil {
tb.Fatalf("failed to create listener: %v", err)
}
server := grpc.NewServer()
workload.RegisterSpiffeWorkloadAPIServer(server, &workloadAPIWrapper{w: w})
w.wg.Add(1)
go func() {
defer w.wg.Done()
_ = server.Serve(listener)
}()
w.addr = getTargetName(listener.Addr())
tb.Logf("WorkloadAPI address: %s", w.addr)
w.server = server
return w
}
func (w *WorkloadAPI) Stop() {
w.server.Stop()
w.wg.Wait()
}
func (w *WorkloadAPI) Addr() string {
return w.addr
}
func (w *WorkloadAPI) SetX509SVIDResponse(r *X509SVIDResponse) {
var resp *workload.X509SVIDResponse
if r != nil {
resp = r.ToProto(w.tb)
}
w.mu.Lock()
defer w.mu.Unlock()
w.x509Resp = resp
for ch := range w.x509Chans {
select {
case ch <- resp:
default:
<-ch
ch <- resp
}
}
}
func (w *WorkloadAPI) SetJWTSVIDResponse(r *workload.JWTSVIDResponse) {
w.mu.Lock()
defer w.mu.Unlock()
if r != nil {
w.jwtResp = r
}
}
func (w *WorkloadAPI) SetJWTBundles(jwtBundles ...*jwtbundle.Bundle) {
resp := &workload.JWTBundlesResponse{
Bundles: make(map[string][]byte),
}
for _, bundle := range jwtBundles {
bundleBytes, err := bundle.Marshal()
if err != nil {
w.tb.Fatalf("failed to marshal JWT bundle: %v", err)
}
resp.Bundles[bundle.TrustDomain().String()] = bundleBytes
}
w.mu.Lock()
defer w.mu.Unlock()
w.jwtBundlesResp = resp
for ch := range w.jwtBundlesChans {
select {
case ch <- w.jwtBundlesResp:
default:
<-ch
ch <- w.jwtBundlesResp
}
}
}
func (w *WorkloadAPI) SetX509Bundles(x509Bundles ...*x509bundle.Bundle) {
resp := &workload.X509BundlesResponse{
Bundles: make(map[string][]byte),
}
for _, bundle := range x509Bundles {
bundleBytes, err := bundle.Marshal()
if err != nil {
w.tb.Fatalf("failed to marshal X509 bundle: %v", err)
}
bundlePem, err := pemutil.ParseCertificates(bundleBytes)
if err != nil {
w.tb.Fatalf("failed to parse certificates: %v", err)
}
var rawBytes []byte
for _, c := range bundlePem {
rawBytes = append(rawBytes, c.Raw...)
}
resp.Bundles[bundle.TrustDomain().String()] = rawBytes
}
w.mu.Lock()
defer w.mu.Unlock()
w.x509BundlesResp = resp
for ch := range w.x509BundlesChans {
select {
case ch <- w.x509BundlesResp:
default:
<-ch
ch <- w.x509BundlesResp
}
}
}
type workloadAPIWrapper struct {
workload.UnimplementedSpiffeWorkloadAPIServer
w *WorkloadAPI
}
func (w *workloadAPIWrapper) FetchX509SVID(req *workload.X509SVIDRequest, stream workload.SpiffeWorkloadAPI_FetchX509SVIDServer) error {
return w.w.fetchX509SVID(req, stream)
}
func (w *workloadAPIWrapper) FetchX509Bundles(req *workload.X509BundlesRequest, stream workload.SpiffeWorkloadAPI_FetchX509BundlesServer) error {
return w.w.fetchX509Bundles(req, stream)
}
func (w *workloadAPIWrapper) FetchJWTSVID(ctx context.Context, req *workload.JWTSVIDRequest) (*workload.JWTSVIDResponse, error) {
return w.w.fetchJWTSVID(ctx, req)
}
func (w *workloadAPIWrapper) FetchJWTBundles(req *workload.JWTBundlesRequest, stream workload.SpiffeWorkloadAPI_FetchJWTBundlesServer) error {
return w.w.fetchJWTBundles(req, stream)
}
func (w *workloadAPIWrapper) ValidateJWTSVID(ctx context.Context, req *workload.ValidateJWTSVIDRequest) (*workload.ValidateJWTSVIDResponse, error) {
return w.w.validateJWTSVID(ctx, req)
}
type X509SVIDResponse struct {
SVIDs []*x509svid.SVID
Bundle *x509bundle.Bundle
FederatedBundles []*x509bundle.Bundle
}
func (r *X509SVIDResponse) ToProto(tb testing.TB) *workload.X509SVIDResponse {
var bundle []byte
if r.Bundle != nil {
bundle = x509util.ConcatRawCertsFromCerts(r.Bundle.X509Authorities())
}
pb := &workload.X509SVIDResponse{
FederatedBundles: make(map[string][]byte),
}
for _, svid := range r.SVIDs {
var keyDER []byte
if svid.PrivateKey != nil {
var err error
keyDER, err = x509.MarshalPKCS8PrivateKey(svid.PrivateKey)
if err != nil {
tb.Fatalf("failed to marshal private key: %v", err)
}
}
pb.Svids = append(pb.Svids, &workload.X509SVID{
SpiffeId: svid.ID.String(),
X509Svid: x509util.ConcatRawCertsFromCerts(svid.Certificates),
X509SvidKey: keyDER,
Bundle: bundle,
})
}
for _, v := range r.FederatedBundles {
pb.FederatedBundles[v.TrustDomain().IDString()] = x509util.ConcatRawCertsFromCerts(v.X509Authorities())
}
return pb
}
func (w *WorkloadAPI) fetchX509SVID(_ *workload.X509SVIDRequest, stream workload.SpiffeWorkloadAPI_FetchX509SVIDServer) error {
if err := checkHeader(stream.Context()); err != nil {
return err
}
ch := make(chan *workload.X509SVIDResponse, 1)
w.mu.Lock()
w.x509Chans[ch] = struct{}{}
resp := w.x509Resp
w.mu.Unlock()
defer func() {
w.mu.Lock()
delete(w.x509Chans, ch)
w.mu.Unlock()
}()
sendResp := func(resp *workload.X509SVIDResponse) error {
if resp == nil {
return noIdentityError
}
return stream.Send(resp)
}
if err := sendResp(resp); err != nil {
return err
}
for {
select {
case resp := <-ch:
if err := sendResp(resp); err != nil {
return err
}
case <-stream.Context().Done():
return stream.Context().Err()
}
}
}
func (w *WorkloadAPI) fetchX509Bundles(_ *workload.X509BundlesRequest, stream workload.SpiffeWorkloadAPI_FetchX509BundlesServer) error {
if err := checkHeader(stream.Context()); err != nil {
return err
}
ch := make(chan *workload.X509BundlesResponse, 1)
w.mu.Lock()
w.x509BundlesChans[ch] = struct{}{}
resp := w.x509BundlesResp
w.mu.Unlock()
defer func() {
w.mu.Lock()
delete(w.x509BundlesChans, ch)
w.mu.Unlock()
}()
sendResp := func(resp *workload.X509BundlesResponse) error {
if resp == nil {
return noIdentityError
}
return stream.Send(resp)
}
if err := sendResp(resp); err != nil {
return err
}
for {
select {
case resp := <-ch:
if err := sendResp(resp); err != nil {
return err
}
case <-stream.Context().Done():
return stream.Context().Err()
}
}
}
func (w *WorkloadAPI) fetchJWTSVID(ctx context.Context, req *workload.JWTSVIDRequest) (*workload.JWTSVIDResponse, error) {
if err := checkHeader(ctx); err != nil {
return nil, err
}
if len(req.Audience) == 0 {
return nil, errors.New("no audience")
}
if w.jwtResp == nil {
return nil, noIdentityError
}
return w.jwtResp, nil
}
func (w *WorkloadAPI) fetchJWTBundles(_ *workload.JWTBundlesRequest, stream workload.SpiffeWorkloadAPI_FetchJWTBundlesServer) error {
if err := checkHeader(stream.Context()); err != nil {
return err
}
ch := make(chan *workload.JWTBundlesResponse, 1)
w.mu.Lock()
w.jwtBundlesChans[ch] = struct{}{}
resp := w.jwtBundlesResp
w.mu.Unlock()
defer func() {
w.mu.Lock()
delete(w.jwtBundlesChans, ch)
w.mu.Unlock()
}()
sendResp := func(resp *workload.JWTBundlesResponse) error {
if resp == nil {
return noIdentityError
}
return stream.Send(resp)
}
if err := sendResp(resp); err != nil {
return err
}
for {
select {
case resp := <-ch:
if err := sendResp(resp); err != nil {
return err
}
case <-stream.Context().Done():
return stream.Context().Err()
}
}
}
func (w *WorkloadAPI) validateJWTSVID(_ context.Context, req *workload.ValidateJWTSVIDRequest) (*workload.ValidateJWTSVIDResponse, error) {
if req.Audience == "" {
return nil, status.Error(codes.InvalidArgument, "audience must be specified")
}
if req.Svid == "" {
return nil, status.Error(codes.InvalidArgument, "svid must be specified")
}
// TODO: validate
jwtSvid, err := jwtsvid.ParseInsecure(req.Svid, []string{req.Audience})
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
claims, err := structFromValues(jwtSvid.Claims)
if err != nil {
w.tb.Fatalf("failed to convert claims to Struct: %v", err)
}
return &workload.ValidateJWTSVIDResponse{
SpiffeId: jwtSvid.ID.String(),
Claims: claims,
}, nil
}
func checkHeader(ctx context.Context) error {
return checkMetadata(ctx, "workload.spiffe.io", "true")
}
func checkMetadata(ctx context.Context, key, value string) error {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return errors.New("request does not contain metadata")
}
values := md.Get(key)
if len(value) == 0 {
return fmt.Errorf("request metadata does not contain %q value", key)
}
if values[0] != value {
return fmt.Errorf("request metadata %q value is %q; expected %q", key, values[0], value)
}
return nil
}
func structFromValues(values map[string]interface{}) (*structpb.Struct, error) {
valuesJSON, err := json.Marshal(values)
if err != nil {
return nil, err
}
s := new(structpb.Struct)
if err := protojson.Unmarshal(valuesJSON, s); err != nil {
return nil, err
}
return s, nil
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fakeworkloadapi
import (
"fmt"
"net"
)
func newListener() (net.Listener, error) {
return net.Listen("tcp", "localhost:0")
}
func getTargetName(addr net.Addr) string {
return fmt.Sprintf("%s://%s", addr.Network(), addr.String())
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"strings"
"testing"
)
// Methods to generate private keys. If generation starts slowing down test
// execution then switch over to pre-generated keys.
// NewEC256Key returns an ECDSA key over the P256 curve
func NewEC256Key(tb testing.TB) *ecdsa.PrivateKey {
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
tb.Fatalf("failed to marshal private key: %v", err)
}
return key
}
// NewKeyID returns a random id useful for identifying keys
func NewKeyID(tb testing.TB) string {
choices := make([]byte, 32)
_, err := rand.Read(choices)
if err != nil {
tb.Fatalf("failed to marshal private key: %v", err)
}
return keyIDFromBytes(choices)
}
func keyIDFromBytes(choices []byte) string {
const alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
var builder strings.Builder
for _, choice := range choices {
builder.WriteByte(alphabet[int(choice)%len(alphabet)])
}
return builder.String()
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pemutil
import (
"crypto"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
)
const (
certType string = "CERTIFICATE"
keyType string = "PRIVATE KEY"
)
func ParseCertificates(certsBytes []byte) ([]*x509.Certificate, error) {
objects, err := parseBlocks(certsBytes, certType)
if err != nil {
return nil, err
}
certs := []*x509.Certificate{}
for _, object := range objects {
cert, ok := object.(*x509.Certificate)
if !ok {
return nil, fmt.Errorf("expected *x509.Certificate; got %T", object)
}
certs = append(certs, cert)
}
return certs, nil
}
func ParsePrivateKey(keyBytes []byte) (crypto.PrivateKey, error) {
objects, err := parseBlocks(keyBytes, keyType)
if err != nil {
return nil, err
}
if len(objects) == 0 {
return nil, nil
}
privateKey, ok := objects[0].(crypto.PrivateKey)
if !ok {
return nil, fmt.Errorf("expected crypto.PrivateKey; got %T", objects[0])
}
return privateKey, nil
}
func EncodePKCS8PrivateKey(privateKey interface{}) ([]byte, error) {
keyBytes, err := x509.MarshalPKCS8PrivateKey(privateKey)
if err != nil {
return nil, err
}
return pem.EncodeToMemory(&pem.Block{
Type: keyType,
Bytes: keyBytes,
}), nil
}
func EncodeCertificates(certificates []*x509.Certificate) []byte {
pemBytes := []byte{}
for _, cert := range certificates {
pemBytes = append(pemBytes, pem.EncodeToMemory(&pem.Block{
Type: certType,
Bytes: cert.Raw,
})...)
}
return pemBytes
}
func parseBlocks(blocksBytes []byte, expectedType string) ([]interface{}, error) {
objects := []interface{}{}
var foundBlocks = false
for {
if len(blocksBytes) == 0 {
if len(objects) == 0 && !foundBlocks {
return nil, errors.New("no PEM blocks found")
}
return objects, nil
}
object, rest, foundBlock, err := parseBlock(blocksBytes, expectedType)
blocksBytes = rest
if foundBlock {
foundBlocks = true
}
switch {
case err != nil:
return nil, err
case object != nil:
objects = append(objects, object)
}
}
}
func parseBlock(pemBytes []byte, pemType string) (interface{}, []byte, bool, error) {
pemBlock, rest := pem.Decode(pemBytes)
if pemBlock == nil {
return nil, nil, false, nil
}
if pemBlock.Type != pemType {
return nil, rest, true, nil
}
var object interface{}
var err error
switch pemType {
case certType:
object, err = x509.ParseCertificate(pemBlock.Bytes)
case keyType:
object, err = x509.ParsePKCS8PrivateKey(pemBlock.Bytes)
default:
err = fmt.Errorf("PEM type not supported: %q", pemType)
}
if err != nil {
return nil, nil, false, err
}
return object, rest, true, nil
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package x509util
import (
"crypto/x509"
)
// NewCertPool returns a new CertPool with the given X.509 certificates
func NewCertPool(certs []*x509.Certificate) *x509.CertPool {
pool := x509.NewCertPool()
for _, cert := range certs {
pool.AddCert(cert)
}
return pool
}
// CopyX509Authorities copies a slice of X.509 certificates to a new slice.
func CopyX509Authorities(x509Authorities []*x509.Certificate) []*x509.Certificate {
copiedX509Authorities := make([]*x509.Certificate, len(x509Authorities))
copy(copiedX509Authorities, x509Authorities)
return copiedX509Authorities
}
// CertsEqual returns true if the slices of X.509 certificates are equal.
func CertsEqual(a, b []*x509.Certificate) bool {
if len(a) != len(b) {
return false
}
for i, cert := range a {
if !cert.Equal(b[i]) {
return false
}
}
return true
}
func RawCertsFromCerts(certs []*x509.Certificate) [][]byte {
rawCerts := make([][]byte, 0, len(certs))
for _, cert := range certs {
rawCerts = append(rawCerts, cert.Raw)
}
return rawCerts
}
func ConcatRawCertsFromCerts(certs []*x509.Certificate) []byte {
var rawCerts []byte
for _, cert := range certs {
rawCerts = append(rawCerts, cert.Raw...)
}
return rawCerts
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spire
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/spiffe/go-spiffe/v2/workloadapi"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/result"
"go.uber.org/zap"
)
// VerifyTaskRunResults ensures that the TaskRun results are valid and have not been tampered with
func (sc *spireControllerAPIClient) VerifyTaskRunResults(ctx context.Context, prs []result.RunResult, tr *v1beta1.TaskRun) error {
err := sc.setupClient(ctx)
if err != nil {
return err
}
resultMap := map[string]result.RunResult{}
for _, r := range prs {
if r.ResultType == result.TaskRunResultType {
resultMap[r.Key] = r
}
}
cert, err := getSVID(resultMap)
if err != nil {
return err
}
trust, err := getTrustBundle(ctx, sc.workloadAPI)
if err != nil {
return err
}
if err := verifyManifest(resultMap); err != nil {
return err
}
if err := verifyCertURI(cert, tr, sc.config.TrustDomain); err != nil {
return err
}
if err := verifyCertificateTrust(cert, trust); err != nil {
return err
}
for key := range resultMap {
if strings.HasSuffix(key, KeySignatureSuffix) {
continue
}
if key == KeySVID {
continue
}
if err := verifyResult(cert.PublicKey, key, resultMap); err != nil {
return err
}
}
return nil
}
// VerifyStatusInternalAnnotation run multuple verification steps to ensure that the spire status annotations are valid
func (sc *spireControllerAPIClient) VerifyStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun, logger *zap.SugaredLogger) error {
err := sc.setupClient(ctx)
if err != nil {
return err
}
if !sc.CheckSpireVerifiedFlag(tr) {
return errors.New("annotation tekton.dev/not-verified = yes failed spire verification")
}
annotations := tr.Status.Annotations
// get trust bundle from spire server
trust, err := getTrustBundle(ctx, sc.workloadAPI)
if err != nil {
return err
}
// verify controller SVID
svid, ok := annotations[controllerSvidAnnotation]
if !ok {
return errors.New("No SVID found")
}
block, _ := pem.Decode([]byte(svid))
if block == nil {
return fmt.Errorf("invalid SVID: %w", err)
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return fmt.Errorf("invalid SVID: %w", err)
}
// verify certificate root of trust
if err := verifyCertificateTrust(cert, trust); err != nil {
return err
}
logger.Infof("Successfully verified certificate %s against SPIRE", svid)
if err := verifyAnnotation(cert.PublicKey, annotations); err != nil {
return err
}
logger.Info("Successfully verified signature")
// CheckStatusInternalAnnotation check current status hash vs annotation status hash by controller
if err := CheckStatusInternalAnnotation(tr); err != nil {
return err
}
logger.Info("Successfully verified status annotation hash matches the current taskrun status")
return nil
}
// CheckSpireVerifiedFlag checks if the verified status annotation is set which would result in spire verification failed
func (sc *spireControllerAPIClient) CheckSpireVerifiedFlag(tr *v1beta1.TaskRun) bool {
if _, ok := tr.Status.Annotations[VerifiedAnnotation]; !ok {
return true
}
return false
}
func hashTaskrunStatusInternal(tr *v1beta1.TaskRun) (string, error) {
s, err := json.Marshal(tr.Status.TaskRunStatusFields)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", sha256.Sum256(s)), nil
}
// CheckStatusInternalAnnotation ensures that the internal status annotation hash and current status hash match
func CheckStatusInternalAnnotation(tr *v1beta1.TaskRun) error {
// get stored hash of status
annotations := tr.Status.Annotations
hash, ok := annotations[TaskRunStatusHashAnnotation]
if !ok {
return fmt.Errorf("no annotation status hash found for %s", TaskRunStatusHashAnnotation)
}
// get current hash of status
current, err := hashTaskrunStatusInternal(tr)
if err != nil {
return err
}
if hash != current {
return fmt.Errorf("current status hash and stored annotation hash does not match! Annotation Hash: %s, Current Status Hash: %s", hash, current)
}
return nil
}
func getSVID(resultMap map[string]result.RunResult) (*x509.Certificate, error) {
svid, ok := resultMap[KeySVID]
if !ok {
return nil, errors.New("no SVID found")
}
svidValue, err := getResultValue(svid)
if err != nil {
return nil, err
}
block, _ := pem.Decode([]byte(svidValue))
if block == nil {
return nil, fmt.Errorf("invalid SVID: %w", err)
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, fmt.Errorf("invalid SVID: %w", err)
}
return cert, nil
}
func getTrustBundle(ctx context.Context, client *workloadapi.Client) (*x509.CertPool, error) {
x509set, err := client.FetchX509Bundles(ctx)
if err != nil {
return nil, err
}
x509Bundle := x509set.Bundles()
if err != nil {
return nil, err
}
if len(x509Bundle) > 0 {
trustPool := x509.NewCertPool()
for _, bundle := range x509Bundle {
for _, c := range bundle.X509Authorities() {
trustPool.AddCert(c)
}
return trustPool, nil
}
}
return nil, errors.Wrap(err, "trust domain bundle empty")
}
func getFullPath(tr *v1beta1.TaskRun) string {
// URI:spiffe://example.org/ns/default/taskrun/cache-image-pipelinerun-r4r22-fetch-from-git
return fmt.Sprintf("/ns/%s/taskrun/%s", tr.Namespace, tr.Name)
}
func verifyCertURI(cert *x509.Certificate, tr *v1beta1.TaskRun, trustDomain string) error {
path := getFullPath(tr)
switch {
case len(cert.URIs) == 0:
return fmt.Errorf("cert uri missing for taskrun: %s", tr.Name)
case len(cert.URIs) > 1:
return fmt.Errorf("cert contains more than one URI for taskrun: %s", tr.Name)
case len(cert.URIs) == 1:
if cert.URIs[0].Host != trustDomain {
return fmt.Errorf("cert uri: %s does not match trust domain: %s", cert.URIs[0].Host, trustDomain)
}
if cert.URIs[0].Path != path {
return fmt.Errorf("cert uri: %s does not match taskrun: %s", cert.URIs[0].Path, path)
}
}
return nil
}
func verifyCertificateTrust(cert *x509.Certificate, rootCertPool *x509.CertPool) error {
verifyOptions := x509.VerifyOptions{
Roots: rootCertPool,
}
chains, err := cert.Verify(verifyOptions)
if len(chains) == 0 || err != nil {
return errors.New("cert cannot be verified by provided roots")
}
return nil
}
func verifyManifest(results map[string]result.RunResult) error {
manifest, ok := results[KeyResultManifest]
if !ok {
return errors.New("no manifest found in results")
}
manifestValue, err := getResultValue(manifest)
if err != nil {
return err
}
s := strings.Split(manifestValue, ",")
for _, key := range s {
_, found := results[key]
if key != "" && !found {
return fmt.Errorf("no result found for %s but is part of the manifest %s", key, manifestValue)
}
}
return nil
}
func verifyAnnotation(pub interface{}, annotations map[string]string) error {
signature, ok := annotations[taskRunStatusHashSigAnnotation]
if !ok {
return fmt.Errorf("no signature found for %s", taskRunStatusHashSigAnnotation)
}
hash, ok := annotations[TaskRunStatusHashAnnotation]
if !ok {
return fmt.Errorf("no annotation status hash found for %s", TaskRunStatusHashAnnotation)
}
return verifySignature(pub, signature, hash)
}
func verifyResult(pub crypto.PublicKey, key string, results map[string]result.RunResult) error {
signature, ok := results[key+KeySignatureSuffix]
if !ok {
return fmt.Errorf("no signature found for %s", key)
}
sigValue, err := getResultValue(signature)
if err != nil {
return err
}
resultValue, err := getResultValue(results[key])
if err != nil {
return err
}
return verifySignature(pub, sigValue, resultValue)
}
func verifySignature(pub crypto.PublicKey, signature string, value string) error {
b, err := base64.StdEncoding.DecodeString(signature)
if err != nil {
return fmt.Errorf("invalid signature: %w", err)
}
h := sha256.Sum256([]byte(value))
// Check val against sig
switch t := pub.(type) {
case *ecdsa.PublicKey:
if !ecdsa.VerifyASN1(t, h[:], b) {
return errors.New("invalid signature")
}
return nil
case *rsa.PublicKey:
return rsa.VerifyPKCS1v15(t, crypto.SHA256, h[:], b)
case ed25519.PublicKey:
if !ed25519.Verify(t, []byte(value), b) {
return errors.New("invalid signature")
}
return nil
default:
return fmt.Errorf("unsupported key type: %s", t)
}
}
func getResultValue(result result.RunResult) (string, error) {
aos := v1beta1.ArrayOrString{}
err := aos.UnmarshalJSON([]byte(result.Value))
valList := []string{}
if err != nil {
return "", fmt.Errorf("unmarshal error for key: %s", result.Key)
}
switch aos.Type {
case v1beta1.ParamTypeString:
return aos.StringVal, nil
case v1beta1.ParamTypeArray:
valList = append(valList, aos.ArrayVal...)
return strings.Join(valList, ","), nil
case v1beta1.ParamTypeObject:
keys := make([]string, 0, len(aos.ObjectVal))
for k := range aos.ObjectVal {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
valList = append(valList, k)
valList = append(valList, aos.ObjectVal[k])
}
return strings.Join(valList, ","), nil
}
return "", fmt.Errorf("invalid result type for key: %s", result.Key)
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package status
import (
"context"
"fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetTaskRunStatusForPipelineTask takes a child reference and returns the actual TaskRunStatus
// for the PipelineTask. It returns an error if the child reference's kind isn't TaskRun.
func GetTaskRunStatusForPipelineTask(ctx context.Context, client versioned.Interface, ns string, childRef v1.ChildStatusReference) (*v1.TaskRunStatus, error) {
if childRef.Kind != "TaskRun" {
return nil, fmt.Errorf("could not fetch status for PipelineTask %s: should have kind TaskRun, but is %s", childRef.PipelineTaskName, childRef.Kind)
}
tr, err := client.TektonV1().TaskRuns(ns).Get(ctx, childRef.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return nil, err
}
if tr == nil {
return nil, nil //nolint:nilnil // would be more ergonomic to return a sentinel error
}
return &tr.Status, nil
}
// GetCustomRunStatusForPipelineTask takes a child reference and returns the actual CustomRunStatus for the
// PipelineTask. It returns an error if the child reference's kind isn't CustomRun.
func GetCustomRunStatusForPipelineTask(ctx context.Context, client versioned.Interface, ns string, childRef v1.ChildStatusReference) (*v1beta1.CustomRunStatus, error) {
var runStatus *v1beta1.CustomRunStatus
switch childRef.Kind {
case "CustomRun":
r, err := client.TektonV1beta1().CustomRuns(ns).Get(ctx, childRef.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return nil, err
}
if r == nil {
return nil, nil //nolint:nilnil // would be more ergonomic to return a sentinel error
}
runStatus = &r.Status
default:
return nil, fmt.Errorf("could not fetch status for PipelineTask %s: should have kind CustomRun, but is %s", childRef.PipelineTaskName, childRef.Kind)
}
return runStatus, nil
}
// GetPipelineTaskStatuses returns populated TaskRun and Run status maps for a PipelineRun from its ChildReferences.
// If the PipelineRun has no ChildReferences, nothing will be populated.
func GetPipelineTaskStatuses(ctx context.Context, client versioned.Interface, ns string, pr *v1.PipelineRun) (map[string]*v1.PipelineRunTaskRunStatus,
map[string]*v1.PipelineRunRunStatus, error) {
// If the PipelineRun is nil, just return
if pr == nil {
return nil, nil, nil
}
// If there are no child references or either TaskRuns or Runs is non-zero, return the existing TaskRuns and Runs maps
if len(pr.Status.ChildReferences) == 0 {
return nil, nil, nil
}
trStatuses := make(map[string]*v1.PipelineRunTaskRunStatus)
runStatuses := make(map[string]*v1.PipelineRunRunStatus)
for _, cr := range pr.Status.ChildReferences {
switch cr.Kind {
case "TaskRun":
tr, err := client.TektonV1().TaskRuns(ns).Get(ctx, cr.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return nil, nil, err
}
trStatuses[cr.Name] = &v1.PipelineRunTaskRunStatus{
PipelineTaskName: cr.PipelineTaskName,
WhenExpressions: cr.WhenExpressions,
}
if tr != nil {
trStatuses[cr.Name].Status = &tr.Status
}
case "CustomRun":
r, err := client.TektonV1beta1().CustomRuns(ns).Get(ctx, cr.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return nil, nil, err
}
runStatuses[cr.Name] = &v1.PipelineRunRunStatus{
PipelineTaskName: cr.PipelineTaskName,
WhenExpressions: cr.WhenExpressions,
}
if r != nil {
runStatuses[cr.Name].Status = &r.Status
}
default:
// Don't do anything for unknown types.
}
}
return trStatuses, runStatuses, nil
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package substitution
import (
"fmt"
"strings"
)
// ApplyReplacements returns a string with references to parameters replaced,
// based on the mapping provided in replacements.
// For example, if the input string is "foo: $(params.foo)", and replacements maps "params.foo" to "bar",
// the output would be "foo: bar".
func ApplyReplacements(in string, replacements map[string]string) string {
replacementsList := []string{}
for k, v := range replacements {
replacementsList = append(replacementsList, fmt.Sprintf("$(%s)", k), v)
}
// strings.Replacer does all replacements in one pass, preventing multiple replacements
// See #2093 for an explanation on why we need to do this.
replacer := strings.NewReplacer(replacementsList...)
return replacer.Replace(in)
}
// ApplyArrayReplacements takes an input string, and output an array of strings related to possible arrayReplacements. If there aren't any
// areas where the input can be split up via arrayReplacements, then just return an array with a single element,
// which is ApplyReplacements(in, replacements).
func ApplyArrayReplacements(in string, stringReplacements map[string]string, arrayReplacements map[string][]string) []string {
for k, v := range arrayReplacements {
stringToReplace := fmt.Sprintf("$(%s)", k)
// If the input string matches a replacement's key (without padding characters), return the corresponding array.
// Note that the webhook should prevent all instances where this could evaluate to false.
if (strings.Count(in, stringToReplace) == 1) && len(in) == len(stringToReplace) {
return v
}
// same replace logic for star array expressions
starStringtoReplace := fmt.Sprintf("$(%s[*])", k)
if (strings.Count(in, starStringtoReplace) == 1) && len(in) == len(starStringtoReplace) {
return v
}
}
// Otherwise return a size-1 array containing the input string with standard stringReplacements applied.
return []string{ApplyReplacements(in, stringReplacements)}
}
//go:build !disable_tls
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package substitution
import (
"fmt"
"regexp"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/apis"
)
const (
parameterSubstitution = `.*?(\[\*\])?`
// braceMatchingRegex is a regex for parameter references including dot notation, bracket notation with single and double quotes.
braceMatchingRegex = "(\\$(\\(%s(\\.(?P<var1>%s)|\\[\"(?P<var2>%s)\"\\]|\\['(?P<var3>%s)'\\])\\)))"
// arrayIndexing will match all `[int]` and `[*]` for parseExpression
arrayIndexing = `\[([0-9])*\*?\]`
// paramIndex will match all `$(params.paramName[int])` expressions
paramIndexing = `\$\(params(\.[_a-zA-Z0-9.-]+|\[\'[_a-zA-Z0-9.-\/]+\'\]|\[\"[_a-zA-Z0-9.-\/]+\"\])\[[0-9]+\]\)`
// intIndex will match all `[int]` expressions
intIndex = `\[[0-9]+\]`
)
// arrayIndexingRegex is used to match `[int]` and `[*]`
var arrayIndexingRegex = regexp.MustCompile(arrayIndexing)
// paramIndexingRegex will match all `$(params.paramName[int])` expressions
var paramIndexingRegex = regexp.MustCompile(paramIndexing)
// intIndexRegex will match all `[int]` for param expression
var intIndexRegex = regexp.MustCompile(intIndex)
// ValidateNoReferencesToUnknownVariables returns an error if the input string contains references to unknown variables
// Inputs:
// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)"
// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline"
// - vars: names of known variables
func ValidateNoReferencesToUnknownVariables(value, prefix string, vars sets.String) *apis.FieldError {
return validateNoReferencesToUnknownVariables(value, prefix, vars, false)
}
// ValidateNoReferencesToUnknownVariablesWithDetail same as ValidateNoReferencesToUnknownVariables
// but with more prefix detailed error message
func ValidateNoReferencesToUnknownVariablesWithDetail(value, prefix string, vars sets.String) *apis.FieldError {
return validateNoReferencesToUnknownVariables(value, prefix, vars, true)
}
func validateNoReferencesToUnknownVariables(value, prefix string, vars sets.String, withDetail bool) *apis.FieldError {
if vs, present, errString := ExtractVariablesFromString(value, prefix); present {
if errString != "" {
return &apis.FieldError{
Message: errString,
Paths: []string{""},
}
}
for _, v := range vs {
v = TrimArrayIndex(v)
if !vars.Has(v) {
var msg string
if withDetail {
msg = fmt.Sprintf("non-existent variable `%s` in %q", v, value)
} else {
msg = fmt.Sprintf("non-existent variable in %q", value)
}
return &apis.FieldError{
Message: msg,
// Empty path is required to make the `ViaField`, … work
Paths: []string{""},
}
}
}
}
return nil
}
// ValidateNoReferencesToProhibitedVariables returns an error if the input string contains any references to any variables in vars,
// except for array indexing references.
//
// Inputs:
// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)"
// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline"
// - vars: names of known variables
func ValidateNoReferencesToProhibitedVariables(value, prefix string, vars sets.String) *apis.FieldError {
if vs, present, errString := ExtractVariablesFromString(value, prefix); present {
if errString != "" {
return &apis.FieldError{
Message: errString,
Paths: []string{""},
}
}
for _, v := range vs {
v = strings.TrimSuffix(v, "[*]")
if vars.Has(v) {
return &apis.FieldError{
Message: fmt.Sprintf("variable type invalid in %q", value),
// Empty path is required to make the `ViaField`, … work
Paths: []string{""},
}
}
}
}
return nil
}
// ValidateNoReferencesToEntireProhibitedVariables returns an error if the input string contains any whole array/object references
// to any variables in vars. References to array indexes or object keys are permitted.
//
// Inputs:
// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)"
// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline"
// - vars: names of known variables
func ValidateNoReferencesToEntireProhibitedVariables(value, prefix string, vars sets.String) *apis.FieldError {
paths := []string{""} // Empty path is required to make the `ViaField`, … work
vs, err := extractEntireVariablesFromString(value, prefix)
if err != nil {
return &apis.FieldError{
Message: fmt.Sprintf("extractEntireVariablesFromString failed : %v", err),
Paths: paths,
}
}
for _, v := range vs {
v = strings.TrimSuffix(v, "[*]")
if vars.Has(v) {
return &apis.FieldError{
Message: fmt.Sprintf("variable type invalid in %q", value),
Paths: paths,
}
}
}
return nil
}
// ValidateVariableReferenceIsIsolated returns an error if the input string contains characters in addition to references to known parameters.
// For example, if "foo" is a known parameter, a value of "foo: $(params.foo)" returns an error, but a value of "$(params.foo)" does not.
// Inputs:
// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)"
// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline"
// - vars: names of known variables
func ValidateVariableReferenceIsIsolated(value, prefix string, vars sets.String) *apis.FieldError {
paths := []string{""} // Empty path is required to make the `ViaField`, … work
if vs, present, errString := ExtractVariablesFromString(value, prefix); present {
if errString != "" {
return &apis.FieldError{
Message: errString,
Paths: paths,
}
}
firstMatch, err := extractExpressionFromString(value, prefix)
if err != nil {
return &apis.FieldError{
Message: err.Error(),
Paths: paths,
}
}
for _, v := range vs {
v = strings.TrimSuffix(v, "[*]")
if vars.Has(v) {
if len(value) != len(firstMatch) {
return &apis.FieldError{
Message: fmt.Sprintf("variable is not properly isolated in %q", value),
Paths: paths,
}
}
}
}
}
return nil
}
// ValidateWholeArrayOrObjectRefInStringVariable validates if a single string field uses references to the whole array/object appropriately
// valid example: "$(params.myObject[*])"
// invalid example: "$(params.name-not-exist[*])"
func ValidateWholeArrayOrObjectRefInStringVariable(name, value, prefix string, vars sets.String) (isIsolated bool, errs *apis.FieldError) {
nameSubstitution := `[_a-zA-Z0-9.-]+\[\*\]`
// a regex to check if the stringValue is an isolated reference to the whole array/object param without extra string literal.
isolatedVariablePattern := fmt.Sprintf(fmt.Sprintf("^%s$", braceMatchingRegex), prefix, nameSubstitution, nameSubstitution, nameSubstitution)
isolatedVariableRegex, err := regexp.Compile(isolatedVariablePattern)
if err != nil {
return false, &apis.FieldError{
Message: fmt.Sprint("Fail to parse the regex: ", err),
Paths: []string{fmt.Sprintf("%s.%s", prefix, name)},
}
}
if isolatedVariableRegex.MatchString(value) {
return true, ValidateNoReferencesToUnknownVariables(value, prefix, vars).ViaFieldKey(prefix, name)
}
return false, nil
}
// extract a the first full string expressions found (e.g "$(input.params.foo)").
// Returns "" if nothing is found.
func extractExpressionFromString(s, prefix string) (string, error) {
pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution)
re, err := regexp.Compile(pattern)
if err != nil {
return "", err
}
match := re.FindStringSubmatch(s)
if match == nil {
return "", nil
}
return match[0], nil
}
// ExtractVariablesFromString extracts variables from an input string s with the given prefix via regex matching.
// It returns a slice of strings which contains the extracted variables, a bool flag to indicate if matches were found
// and the error string if the referencing of parameters is invalid.
// If the string does not contain the input prefix then the output will contain a slice of strings with length 0.
func ExtractVariablesFromString(s, prefix string) ([]string, bool, string) {
pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution)
re, err := regexp.Compile(pattern)
if err != nil {
return nil, false, ""
}
matches := re.FindAllStringSubmatch(s, -1)
errString := ""
// Input string does not contain the prefix and therefore not matches are found.
if len(matches) == 0 {
return []string{}, false, ""
}
vars := make([]string, len(matches))
for i, match := range matches {
groups := matchGroups(match, re)
for j, v := range []string{"var1", "var2", "var3"} {
val := groups[v]
// If using the dot notation, the number of dot-separated components is restricted up to 2.
// Valid Examples:
// - extract "aString" from <prefix>.aString
// - extract "anObject" from <prefix>.anObject.key
// Invalid Examples:
// - <prefix>.foo.bar.baz....
if j == 0 && strings.Contains(val, ".") {
if len(strings.Split(val, ".")) > 2 {
errString = fmt.Sprintf(`Invalid referencing of parameters in "%s"! Only two dot-separated components after the prefix "%s" are allowed.`, s, prefix)
return vars, true, errString
}
vars[i] = strings.SplitN(val, ".", 2)[0]
break
}
if val != "" {
vars[i] = val
break
}
}
}
return vars, true, errString
}
// extractEntireVariablesFromString returns any references to entire array or object params in s with the given prefix
func extractEntireVariablesFromString(s, prefix string) ([]string, error) {
pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution)
re, err := regexp.Compile(pattern)
if err != nil {
return nil, fmt.Errorf("failed to parse regex pattern: %w", err)
}
matches := re.FindAllStringSubmatch(s, -1)
if len(matches) == 0 {
return []string{}, nil
}
vars := make([]string, len(matches))
for i, match := range matches {
groups := matchGroups(match, re)
// foo -> foo
// foo.bar -> foo.bar
// foo.bar.baz -> foo.bar.baz
for _, v := range []string{"var1", "var2", "var3"} {
val := groups[v]
if val != "" {
vars[i] = val
break
}
}
}
return vars, nil
}
func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string {
groups := make(map[string]string)
for i, name := range pattern.SubexpNames()[1:] {
groups[name] = matches[i+1]
}
return groups
}
// TrimArrayIndex replaces all `[i]` and `[*]` to "".
func TrimArrayIndex(s string) string {
return arrayIndexingRegex.ReplaceAllString(s, "")
}
// ExtractArrayIndexingParamsExpressions will find all `$(params.paramName[int])` expressions
func ExtractArrayIndexingParamsExpressions(s string) []string {
return paramIndexingRegex.FindAllString(s, -1)
}
func ExtractVariableExpressions(s, prefix string) ([]string, error) {
pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution)
re, err := regexp.Compile(pattern)
if err != nil {
return nil, fmt.Errorf("failed to parse regex pattern: %w", err)
}
matches := re.FindAllString(s, -1)
if len(matches) == 0 {
return []string{}, nil
}
return matches, nil
}
// ExtractIndexString will find the leftmost match of `[int]`
func ExtractIndexString(s string) string {
return intIndexRegex.FindString(s)
}
// ExtractIndex will extract int from `[int]`
func ExtractIndex(s string) (int, error) {
return strconv.Atoi(strings.TrimSuffix(strings.TrimPrefix(s, "["), "]"))
}
// StripStarVarSubExpression strips "$(target[*])"" to get "target"
func StripStarVarSubExpression(s string) string {
return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(s, "$("), ")"), "[*]")
}
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"context"
_ "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun/fake" // Make sure the fake taskrun informer is setup
"github.com/tektoncd/pipeline/pkg/taskrunmetrics"
"k8s.io/client-go/rest"
"knative.dev/pkg/injection"
)
func init() {
injection.Fake.RegisterClient(func(ctx context.Context, _ *rest.Config) context.Context { return taskrunmetrics.WithClient(ctx) })
injection.Fake.RegisterInformer(taskrunmetrics.WithInformer)
}
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package taskrunmetrics
import (
"context"
taskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun"
listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
"k8s.io/client-go/rest"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection"
"knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterClient(func(ctx context.Context, _ *rest.Config) context.Context { return WithClient(ctx) })
injection.Default.RegisterInformer(WithInformer)
}
// RecorderKey is used for associating the Recorder inside the context.Context.
type RecorderKey struct{}
// WithClient adds a metrics recorder to the given context
func WithClient(ctx context.Context) context.Context {
rec, err := NewRecorder(ctx)
if err != nil {
logging.FromContext(ctx).Errorf("Failed to create taskrun metrics recorder %v", err)
}
return context.WithValue(ctx, RecorderKey{}, rec)
}
// Get extracts the taskrunmetrics.Recorder from the context.
func Get(ctx context.Context) *Recorder {
untyped := ctx.Value(RecorderKey{})
if untyped == nil {
logging.FromContext(ctx).Panic("Unable to fetch *taskrunmetrics.Recorder from context.")
}
return untyped.(*Recorder)
}
// InformerKey is used for associating the Informer inside the context.Context.
type InformerKey struct{}
// WithInformer returns the given context, and a configured informer
func WithInformer(ctx context.Context) (context.Context, controller.Informer) {
return ctx, &recorderInformer{
ctx: ctx,
metrics: Get(ctx),
lister: taskruninformer.Get(ctx).Lister(),
}
}
type recorderInformer struct {
ctx context.Context
metrics *Recorder
lister listers.TaskRunLister
}
var _ controller.Informer = (*recorderInformer)(nil)
// Run starts the recorder informer in a goroutine
func (ri *recorderInformer) Run(stopCh <-chan struct{}) {
// Turn the stopCh into a context for reporting metrics.
ctx, cancel := context.WithCancel(ri.ctx)
go func() {
<-stopCh
cancel()
}()
go ri.metrics.ReportRunningTaskRuns(ctx, ri.lister)
}
// HasSynced returns whether the informer has synced, which in this case will always be true.
func (ri *recorderInformer) HasSynced() bool {
return true
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package taskrunmetrics
import (
"context"
"encoding/hex"
"fmt"
"sync"
"time"
"github.com/pkg/errors"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/pod"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.uber.org/zap"
"golang.org/x/crypto/blake2b"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"knative.dev/pkg/apis"
"knative.dev/pkg/logging"
"knative.dev/pkg/metrics"
)
const anonymous = "anonymous"
var (
pipelinerunTag = tag.MustNewKey("pipelinerun")
pipelineTag = tag.MustNewKey("pipeline")
taskrunTag = tag.MustNewKey("taskrun")
taskTag = tag.MustNewKey("task")
namespaceTag = tag.MustNewKey("namespace")
statusTag = tag.MustNewKey("status")
reasonTag = tag.MustNewKey("reason")
podTag = tag.MustNewKey("pod")
trDurationView *view.View
prTRDurationView *view.View
trTotalView *view.View
runningTRsView *view.View
runningTRsThrottledByQuotaView *view.View
runningTRsThrottledByNodeView *view.View
runningTRsWaitingOnTaskResolutionCountView *view.View
podLatencyView *view.View
trDuration = stats.Float64(
"taskrun_duration_seconds",
"The taskrun's execution time in seconds",
stats.UnitDimensionless)
prTRDuration = stats.Float64(
"pipelinerun_taskrun_duration_seconds",
"The pipelinerun's taskrun execution time in seconds",
stats.UnitDimensionless)
trTotal = stats.Float64("taskrun_total",
"Number of taskruns",
stats.UnitDimensionless)
runningTRs = stats.Float64("running_taskruns",
"Number of taskruns executing currently",
stats.UnitDimensionless)
runningTRsWaitingOnTaskResolutionCount = stats.Float64("running_taskruns_waiting_on_task_resolution_count",
"Number of taskruns executing currently that are waiting on resolution requests for their task references.",
stats.UnitDimensionless)
runningTRsThrottledByQuota = stats.Float64("running_taskruns_throttled_by_quota",
"Number of taskruns executing currently, but whose underlying Pods or Containers are suspended by k8s because of defined ResourceQuotas. Such suspensions can occur as part of initial scheduling of the Pod, or scheduling of any of the subsequent Container(s) in the Pod after the first Container is started",
stats.UnitDimensionless)
runningTRsThrottledByNode = stats.Float64("running_taskruns_throttled_by_node",
"Number of taskruns executing currently, but whose underlying Pods or Containers are suspended by k8s because of Node level constraints. Such suspensions can occur as part of initial scheduling of the Pod, or scheduling of any of the subsequent Container(s) in the Pod after the first Container is started",
stats.UnitDimensionless)
podLatency = stats.Float64("taskruns_pod_latency_milliseconds",
"scheduling latency for the taskruns pods",
stats.UnitMilliseconds)
)
// Recorder is used to actually record TaskRun metrics
type Recorder struct {
mutex sync.Mutex
initialized bool
cfg *config.Metrics
ReportingPeriod time.Duration
insertTaskTag func(task,
taskrun string) []tag.Mutator
insertPipelineTag func(pipeline,
pipelinerun string) []tag.Mutator
hash string
}
// We cannot register the view multiple times, so NewRecorder lazily
// initializes this singleton and returns the same recorder across any
// subsequent invocations.
var (
once sync.Once
r *Recorder
errRegistering error
)
// NewRecorder creates a new metrics recorder instance
// to log the TaskRun related metrics
func NewRecorder(ctx context.Context) (*Recorder, error) {
once.Do(func() {
cfg := config.FromContextOrDefaults(ctx)
r = &Recorder{
initialized: true,
cfg: cfg.Metrics,
// Default to reporting metrics every 30s.
ReportingPeriod: 30 * time.Second,
}
errRegistering = viewRegister(cfg.Metrics)
if errRegistering != nil {
r.initialized = false
return
}
})
return r, errRegistering
}
func viewRegister(cfg *config.Metrics) error {
r.mutex.Lock()
defer r.mutex.Unlock()
var prunTag []tag.Key
switch cfg.PipelinerunLevel {
case config.PipelinerunLevelAtPipelinerun:
prunTag = []tag.Key{pipelineTag, pipelinerunTag}
r.insertPipelineTag = pipelinerunInsertTag
case config.PipelinerunLevelAtPipeline:
prunTag = []tag.Key{pipelineTag}
r.insertPipelineTag = pipelineInsertTag
case config.PipelinerunLevelAtNS:
prunTag = []tag.Key{}
r.insertPipelineTag = nilInsertTag
default:
return errors.New("invalid config for PipelinerunLevel: " + cfg.PipelinerunLevel)
}
var trunTag []tag.Key
switch cfg.TaskrunLevel {
case config.TaskrunLevelAtTaskrun:
trunTag = []tag.Key{taskTag, taskrunTag}
r.insertTaskTag = taskrunInsertTag
case config.TaskrunLevelAtTask:
trunTag = []tag.Key{taskTag}
r.insertTaskTag = taskInsertTag
case config.PipelinerunLevelAtNS:
trunTag = []tag.Key{}
r.insertTaskTag = nilInsertTag
default:
return errors.New("invalid config for TaskrunLevel: " + cfg.TaskrunLevel)
}
distribution := view.Distribution(10, 30, 60, 300, 900, 1800, 3600, 5400, 10800, 21600, 43200, 86400)
if cfg.TaskrunLevel == config.TaskrunLevelAtTaskrun ||
cfg.PipelinerunLevel == config.PipelinerunLevelAtPipelinerun {
distribution = view.LastValue()
} else {
switch cfg.DurationTaskrunType {
case config.DurationTaskrunTypeHistogram:
case config.DurationTaskrunTypeLastValue:
distribution = view.LastValue()
default:
return errors.New("invalid config for DurationTaskrunType: " + cfg.DurationTaskrunType)
}
}
if cfg.CountWithReason {
trunTag = append(trunTag, reasonTag)
}
trDurationView = &view.View{
Description: trDuration.Description(),
Measure: trDuration,
Aggregation: distribution,
TagKeys: append([]tag.Key{statusTag, namespaceTag}, trunTag...),
}
prTRDurationView = &view.View{
Description: prTRDuration.Description(),
Measure: prTRDuration,
Aggregation: distribution,
TagKeys: append([]tag.Key{statusTag, namespaceTag}, append(trunTag, prunTag...)...),
}
trTotalView = &view.View{
Description: trTotal.Description(),
Measure: trTotal,
Aggregation: view.Count(),
TagKeys: []tag.Key{statusTag},
}
runningTRsView = &view.View{
Description: runningTRs.Description(),
Measure: runningTRs,
Aggregation: view.LastValue(),
}
runningTRsWaitingOnTaskResolutionCountView = &view.View{
Description: runningTRsWaitingOnTaskResolutionCount.Description(),
Measure: runningTRsWaitingOnTaskResolutionCount,
Aggregation: view.LastValue(),
}
throttleViewTags := []tag.Key{}
if cfg.ThrottleWithNamespace {
throttleViewTags = append(throttleViewTags, namespaceTag)
}
runningTRsThrottledByQuotaView = &view.View{
Description: runningTRsThrottledByQuota.Description(),
Measure: runningTRsThrottledByQuota,
Aggregation: view.LastValue(),
TagKeys: throttleViewTags,
}
runningTRsThrottledByNodeView = &view.View{
Description: runningTRsThrottledByNode.Description(),
Measure: runningTRsThrottledByNode,
Aggregation: view.LastValue(),
TagKeys: throttleViewTags,
}
podLatencyView = &view.View{
Description: podLatency.Description(),
Measure: podLatency,
Aggregation: view.LastValue(),
TagKeys: append([]tag.Key{namespaceTag, podTag}, trunTag...),
}
return view.Register(
trDurationView,
prTRDurationView,
trTotalView,
runningTRsView,
runningTRsWaitingOnTaskResolutionCountView,
runningTRsThrottledByQuotaView,
runningTRsThrottledByNodeView,
podLatencyView,
)
}
func viewUnregister() {
view.Unregister(
trDurationView,
prTRDurationView,
trTotalView,
runningTRsView,
runningTRsWaitingOnTaskResolutionCountView,
runningTRsThrottledByQuotaView,
runningTRsThrottledByNodeView,
podLatencyView,
)
}
// OnStore returns a function that checks if metrics are configured for a config.Store, and registers it if so
func OnStore(logger *zap.SugaredLogger, r *Recorder) func(name string, value interface{}) {
return func(name string, value interface{}) {
if name == config.GetMetricsConfigName() {
cfg, ok := value.(*config.Metrics)
if !ok {
logger.Error("Failed to do type insertion for extracting metrics config")
return
}
updated := r.updateConfig(cfg)
if !updated {
return
}
// Update metrics according to the configuration
viewUnregister()
err := viewRegister(cfg)
if err != nil {
logger.Errorf("Failed to register View %v ", err)
return
}
}
}
}
func pipelinerunInsertTag(pipeline, pipelinerun string) []tag.Mutator {
return []tag.Mutator{
tag.Insert(pipelineTag, pipeline),
tag.Insert(pipelinerunTag, pipelinerun),
}
}
func pipelineInsertTag(pipeline, pipelinerun string) []tag.Mutator {
return []tag.Mutator{tag.Insert(pipelineTag, pipeline)}
}
func taskrunInsertTag(task, taskrun string) []tag.Mutator {
return []tag.Mutator{
tag.Insert(taskTag, task),
tag.Insert(taskrunTag, taskrun),
}
}
func taskInsertTag(task, taskrun string) []tag.Mutator {
return []tag.Mutator{tag.Insert(taskTag, task)}
}
func nilInsertTag(task, taskrun string) []tag.Mutator {
return []tag.Mutator{}
}
func getTaskTagName(tr *v1.TaskRun) string {
taskName := anonymous
switch {
case tr.Spec.TaskRef != nil && len(tr.Spec.TaskRef.Name) > 0:
taskName = tr.Spec.TaskRef.Name
case tr.Spec.TaskSpec != nil:
pipelineTaskTable, hasPipelineTaskTable := tr.Labels[pipeline.PipelineTaskLabelKey]
if hasPipelineTaskTable && len(pipelineTaskTable) > 0 {
taskName = pipelineTaskTable
}
default:
if len(tr.Labels) > 0 {
taskLabel, hasTaskLabel := tr.Labels[pipeline.TaskLabelKey]
if hasTaskLabel && len(taskLabel) > 0 {
taskName = taskLabel
}
}
}
return taskName
}
func (r *Recorder) updateConfig(cfg *config.Metrics) bool {
r.mutex.Lock()
defer r.mutex.Unlock()
var hash string
if cfg != nil {
s := fmt.Sprintf("%v", *cfg)
sum := blake2b.Sum256([]byte(s))
hash = hex.EncodeToString(sum[:])
}
if r.hash == hash {
return false
}
r.cfg = cfg
r.hash = hash
return true
}
// DurationAndCount logs the duration of TaskRun execution and
// count for number of TaskRuns succeed or failed
// returns an error if its failed to log the metrics
func (r *Recorder) DurationAndCount(ctx context.Context, tr *v1.TaskRun, beforeCondition *apis.Condition) error {
if !r.initialized {
return fmt.Errorf("ignoring the metrics recording for %s , failed to initialize the metrics recorder", tr.Name)
}
afterCondition := tr.Status.GetCondition(apis.ConditionSucceeded)
if equality.Semantic.DeepEqual(beforeCondition, afterCondition) {
return nil
}
r.mutex.Lock()
defer r.mutex.Unlock()
duration := time.Since(tr.Status.StartTime.Time)
if tr.Status.CompletionTime != nil {
duration = tr.Status.CompletionTime.Sub(tr.Status.StartTime.Time)
}
taskName := getTaskTagName(tr)
cond := tr.Status.GetCondition(apis.ConditionSucceeded)
status := "success"
if cond.Status == corev1.ConditionFalse {
status = "failed"
}
reason := cond.Reason
durationStat := trDuration
tags := []tag.Mutator{tag.Insert(namespaceTag, tr.Namespace), tag.Insert(statusTag, status), tag.Insert(reasonTag, reason)}
if ok, pipeline, pipelinerun := IsPartOfPipeline(tr); ok {
durationStat = prTRDuration
tags = append(tags, r.insertPipelineTag(pipeline, pipelinerun)...)
}
tags = append(tags, r.insertTaskTag(taskName, tr.Name)...)
ctx, err := tag.New(ctx, tags...)
if err != nil {
return err
}
metrics.Record(ctx, durationStat.M(duration.Seconds()))
metrics.Record(ctx, trTotal.M(1))
return nil
}
// RunningTaskRuns logs the number of TaskRuns running right now
// returns an error if its failed to log the metrics
func (r *Recorder) RunningTaskRuns(ctx context.Context, lister listers.TaskRunLister) error {
r.mutex.Lock()
defer r.mutex.Unlock()
if !r.initialized {
return errors.New("ignoring the metrics recording, failed to initialize the metrics recorder")
}
trs, err := lister.List(labels.Everything())
if err != nil {
return err
}
addNamespaceLabelToQuotaThrottleMetric := r.cfg != nil && r.cfg.ThrottleWithNamespace
var runningTrs int
trsThrottledByQuota := map[string]int{}
trsThrottledByNode := map[string]int{}
var trsWaitResolvingTaskRef int
for _, pr := range trs {
// initialize metrics with namespace tag to zero if unset; will then update as needed below
_, ok := trsThrottledByQuota[pr.Namespace]
if !ok {
trsThrottledByQuota[pr.Namespace] = 0
}
_, ok = trsThrottledByNode[pr.Namespace]
if !ok {
trsThrottledByNode[pr.Namespace] = 0
}
if pr.IsDone() {
continue
}
runningTrs++
succeedCondition := pr.Status.GetCondition(apis.ConditionSucceeded)
if succeedCondition != nil && succeedCondition.Status == corev1.ConditionUnknown {
switch succeedCondition.Reason {
case pod.ReasonExceededResourceQuota:
cnt := trsThrottledByQuota[pr.Namespace]
cnt++
trsThrottledByQuota[pr.Namespace] = cnt
case pod.ReasonExceededNodeResources:
cnt := trsThrottledByNode[pr.Namespace]
cnt++
trsThrottledByNode[pr.Namespace] = cnt
case v1.TaskRunReasonResolvingTaskRef:
trsWaitResolvingTaskRef++
}
}
}
ctx, err = tag.New(ctx)
if err != nil {
return err
}
metrics.Record(ctx, runningTRs.M(float64(runningTrs)))
metrics.Record(ctx, runningTRsWaitingOnTaskResolutionCount.M(float64(trsWaitResolvingTaskRef)))
for ns, cnt := range trsThrottledByQuota {
var mutators []tag.Mutator
if addNamespaceLabelToQuotaThrottleMetric {
mutators = []tag.Mutator{tag.Insert(namespaceTag, ns)}
}
ctx, err := tag.New(ctx, mutators...)
if err != nil {
return err
}
metrics.Record(ctx, runningTRsThrottledByQuota.M(float64(cnt)))
}
for ns, cnt := range trsThrottledByNode {
var mutators []tag.Mutator
if addNamespaceLabelToQuotaThrottleMetric {
mutators = []tag.Mutator{tag.Insert(namespaceTag, ns)}
}
ctx, err := tag.New(ctx, mutators...)
if err != nil {
return err
}
metrics.Record(ctx, runningTRsThrottledByNode.M(float64(cnt)))
}
return nil
}
// ReportRunningTaskRuns invokes RunningTaskRuns on our configured PeriodSeconds
// until the context is cancelled.
func (r *Recorder) ReportRunningTaskRuns(ctx context.Context, lister listers.TaskRunLister) {
logger := logging.FromContext(ctx)
for {
delay := time.NewTimer(r.ReportingPeriod)
select {
case <-ctx.Done():
// When the context is cancelled, stop reporting.
if !delay.Stop() {
<-delay.C
}
return
case <-delay.C:
// Every 30s surface a metric for the number of running tasks, as well as those running tasks that are currently throttled by k8s,
// and those running tasks waiting on task reference resolution
if err := r.RunningTaskRuns(ctx, lister); err != nil {
logger.Warnf("Failed to log the metrics : %v", err)
}
}
}
}
// RecordPodLatency logs the duration required to schedule the pod for TaskRun
// returns an error if its failed to log the metrics
func (r *Recorder) RecordPodLatency(ctx context.Context, pod *corev1.Pod, tr *v1.TaskRun) error {
r.mutex.Lock()
defer r.mutex.Unlock()
if !r.initialized {
return errors.New("ignoring the metrics recording for pod , failed to initialize the metrics recorder")
}
scheduledTime := getScheduledTime(pod)
if scheduledTime.IsZero() {
return errors.New("pod has never got scheduled")
}
latency := scheduledTime.Sub(pod.CreationTimestamp.Time)
taskName := getTaskTagName(tr)
ctx, err := tag.New(
ctx,
append([]tag.Mutator{
tag.Insert(namespaceTag, tr.Namespace),
tag.Insert(podTag, pod.Name),
},
r.insertTaskTag(taskName, tr.Name)...)...)
if err != nil {
return err
}
metrics.Record(ctx, podLatency.M(float64(latency.Milliseconds())))
return nil
}
// IsPartOfPipeline return true if TaskRun is a part of a Pipeline.
// It also return the name of Pipeline and PipelineRun
func IsPartOfPipeline(tr *v1.TaskRun) (bool, string, string) {
pipelineLabel, hasPipelineLabel := tr.Labels[pipeline.PipelineLabelKey]
pipelineRunLabel, hasPipelineRunLabel := tr.Labels[pipeline.PipelineRunLabelKey]
if hasPipelineLabel && hasPipelineRunLabel {
return true, pipelineLabel, pipelineRunLabel
}
return false, "", ""
}
func getScheduledTime(pod *corev1.Pod) metav1.Time {
for _, c := range pod.Status.Conditions {
if c.Type == corev1.PodScheduled {
return c.LastTransitionTime
}
}
return metav1.Time{}
}
//go:build !disable_tls
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package termination
import (
"encoding/json"
"fmt"
"sort"
"github.com/tektoncd/pipeline/pkg/result"
"go.uber.org/zap"
)
// ParseMessage parses a termination message as results.
//
// If more than one item has the same key, only the latest is returned. Items
// are sorted by their key.
func ParseMessage(logger *zap.SugaredLogger, msg string) ([]result.RunResult, error) {
if msg == "" {
return nil, nil
}
var r []result.RunResult
if err := json.Unmarshal([]byte(msg), &r); err != nil {
return nil, fmt.Errorf("parsing message json: %w, msg: %s", err, msg)
}
writeIndex := 0
for _, rr := range r {
if rr != (result.RunResult{}) {
// Erase incorrect result
r[writeIndex] = rr
writeIndex++
} else {
logger.Errorf("termination message contains non taskrun or pipelineresource result keys")
}
}
r = r[:writeIndex]
// Remove duplicates (last one wins) and sort by key.
m := map[string]result.RunResult{}
for _, rr := range r {
m[rr.Key] = rr
}
r2 := make([]result.RunResult, 0, len(m))
for _, v := range m {
r2 = append(r2, v)
}
sort.Slice(r2, func(i, j int) bool { return r2[i].Key < r2[j].Key })
return r2, nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package termination
import (
"encoding/json"
"os"
"github.com/tektoncd/pipeline/pkg/result"
)
const (
// MaxContainerTerminationMessageLength is the upper bound any one container may write to
// its termination message path. Contents above this length will cause a failure.
MaxContainerTerminationMessageLength = 1024 * 4
)
// WriteMessage writes the results to the termination message path.
func WriteMessage(path string, pro []result.RunResult) error {
// if the file at path exists, concatenate the new values otherwise create it
// file at path already exists
fileContents, err := os.ReadFile(path)
if err == nil {
var existingEntries []result.RunResult
if err := json.Unmarshal(fileContents, &existingEntries); err == nil {
// append new entries to existing entries
pro = append(existingEntries, pro...)
}
} else if !os.IsNotExist(err) {
return err
}
jsonOutput, err := json.Marshal(pro)
if err != nil {
return err
}
if len(jsonOutput) > MaxContainerTerminationMessageLength {
return errTooLong
}
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
defer f.Close()
if _, err = f.Write(jsonOutput); err != nil {
return err
}
return f.Sync()
}
// MessageLengthError indicate the length of termination message of container is beyond 4096 which is the max length read by kubenates
type MessageLengthError string
const (
errTooLong MessageLengthError = "Termination message is above max allowed size 4096, caused by large task result."
)
func (e MessageLengthError) Error() string {
return string(e)
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tracing
import (
"context"
"encoding/base64"
"fmt"
"net/url"
"github.com/tektoncd/pipeline/pkg/apis/config"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/noop"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
listerv1 "k8s.io/client-go/listers/core/v1"
"knative.dev/pkg/system"
)
type tracerProvider struct {
embedded.TracerProvider
service string
provider trace.TracerProvider
cfg *config.Tracing
username string
password string
logger *zap.SugaredLogger
}
func init() {
otel.SetTextMapPropagator(propagation.TraceContext{})
}
// New returns a new instance of tracerProvider for the given service
func New(service string, logger *zap.SugaredLogger) *tracerProvider {
return &tracerProvider{
service: service,
provider: noop.NewTracerProvider(),
logger: logger,
}
}
// OnStore configures tracerProvider dynamically
func (t *tracerProvider) OnStore(lister listerv1.SecretLister) func(name string, value interface{}) {
return func(name string, value interface{}) {
if name != config.GetTracingConfigName() {
return
}
cfg, ok := value.(*config.Tracing)
if !ok {
t.logger.Error("tracing configmap is in invalid format. value: %v", value)
return
}
if cfg.Equals(t.cfg) {
t.logger.Info("tracing config unchanged", cfg, t.cfg)
return
}
t.cfg = cfg
if lister != nil && cfg.CredentialsSecret != "" {
sec, err := lister.Secrets(system.Namespace()).Get(cfg.CredentialsSecret)
if err != nil {
t.logger.Errorf("unable to initialize tracing with error : %v", err.Error())
return
}
creds := sec.Data
t.username = string(creds["username"])
t.password = string(creds["password"])
} else {
t.username = ""
t.password = ""
}
t.reinitialize()
}
}
func (t *tracerProvider) Tracer(name string, options ...trace.TracerOption) trace.Tracer {
return t.provider.Tracer(name, options...)
}
// Handler is called by the informer when the secret is updated
func (t *tracerProvider) Handler(obj interface{}) {
secret, ok := obj.(*corev1.Secret)
if !ok {
t.logger.Error("Failed to do type assertion for Secret")
return
}
t.OnSecret(secret)
}
func (t *tracerProvider) OnSecret(secret *corev1.Secret) {
if secret.Name != t.cfg.CredentialsSecret {
return
}
creds := secret.Data
username := string(creds["username"])
password := string(creds["password"])
if t.username == username && t.password == password {
// No change in credentials, no need to reinitialize
return
}
t.username = username
t.password = password
t.logger.Debugf("tracing credentials updated, reinitializing tracingprovider with secret: %v", secret.Name)
t.reinitialize()
}
func (t *tracerProvider) reinitialize() {
tp, err := createTracerProvider(t.service, t.cfg, t.username, t.password)
if err != nil {
t.logger.Errorf("unable to initialize tracing with error : %v", err.Error())
return
}
t.logger.Info("initialized Tracer Provider")
if p, ok := t.provider.(*tracesdk.TracerProvider); ok {
if err := p.Shutdown(context.Background()); err != nil {
t.logger.Errorf("unable to shutdown tracingprovider with error : %v", err.Error())
}
}
t.provider = tp
}
func createTracerProvider(service string, cfg *config.Tracing, user, pass string) (trace.TracerProvider, error) {
if !cfg.Enabled {
return noop.NewTracerProvider(), nil
}
u, err := url.Parse(cfg.Endpoint)
if err != nil {
return nil, err
}
opts := []otlptracehttp.Option{
otlptracehttp.WithEndpoint(u.Host),
otlptracehttp.WithURLPath(u.Path),
}
if u.Scheme == "http" {
opts = append(opts, otlptracehttp.WithInsecure())
}
if user != "" && pass != "" {
creds := fmt.Sprintf("%s:%s", user, pass)
enc := base64.StdEncoding.EncodeToString([]byte(creds))
o := otlptracehttp.WithHeaders(map[string]string{
"Authorization": "Basic " + enc,
})
opts = append(opts, o)
}
ctx := context.Background()
exp, err := otlptracehttp.New(ctx, opts...)
if err != nil {
return nil, err
}
// Initialize tracerProvider with the jaeger exporter
tp := tracesdk.NewTracerProvider(
tracesdk.WithBatcher(exp),
// Record information about the service in a Resource.
tracesdk.WithResource(resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String(service),
)),
)
return tp, nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package verifier
import (
"context"
"crypto"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/sigstore/sigstore/pkg/cryptoutils"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/sigstore/sigstore/pkg/signature/kms"
// TODO(#5976): consider move these registration to cmd/controller/main.go
_ "github.com/sigstore/sigstore/pkg/signature/kms/aws" // imported to execute init function to register aws kms
_ "github.com/sigstore/sigstore/pkg/signature/kms/azure" // imported to execute init function to register azure kms
_ "github.com/sigstore/sigstore/pkg/signature/kms/gcp" // imported to execute init function to register gcp kms
_ "github.com/sigstore/sigstore/pkg/signature/kms/hashivault" // imported to execute init function to register hashivault kms
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const (
// keyReference is the prefix of secret reference
keyReference = "k8s://"
)
// FromPolicy get all verifiers from VerificationPolicy.
// For each policy, loop the Authorities of the VerificationPolicy to fetch public key
// from either inline Data or from a SecretRef.
func FromPolicy(ctx context.Context, k8s kubernetes.Interface, policy *v1alpha1.VerificationPolicy) ([]signature.Verifier, error) {
verifiers := []signature.Verifier{}
for _, a := range policy.Spec.Authorities {
algorithm, err := matchHashAlgorithm(a.Key.HashAlgorithm)
if err != nil {
return nil, fmt.Errorf("authority %q contains an invalid hash algorithm: %w", a.Name, err)
}
switch {
case a.Key.Data != "":
v, err := fromData([]byte(a.Key.Data), algorithm)
if err != nil {
return nil, fmt.Errorf("failed to get verifier from data: %w", err)
}
verifiers = append(verifiers, v)
case a.Key.SecretRef != nil:
v, err := fromSecret(ctx, fmt.Sprintf("%s%s/%s", keyReference, a.Key.SecretRef.Namespace, a.Key.SecretRef.Name), algorithm, k8s)
if err != nil {
return nil, fmt.Errorf("failed to get verifier from secret: %w", err)
}
verifiers = append(verifiers, v)
case a.Key.KMS != "":
v, err := kms.Get(ctx, a.Key.KMS, algorithm)
if err != nil {
return nil, fmt.Errorf("failed to get verifier from kms: %w", err)
}
verifiers = append(verifiers, v)
default:
return nil, ErrEmptyKey
}
}
if len(verifiers) == 0 {
return verifiers, ErrEmptyPublicKeys
}
return verifiers, nil
}
// fromKeyRef parses the given keyRef, loads the key and returns an appropriate
// verifier using the provided hash algorithm
func fromKeyRef(ctx context.Context, keyRef string, hashAlgorithm crypto.Hash, k8s kubernetes.Interface) (signature.Verifier, error) {
var raw []byte
if strings.HasPrefix(keyRef, keyReference) {
v, err := fromSecret(ctx, keyRef, hashAlgorithm, k8s)
if err != nil {
return nil, fmt.Errorf("failed to get verifier from secret: %w", err)
}
return v, nil
}
raw, err := os.ReadFile(filepath.Clean(keyRef))
if err != nil {
return nil, fmt.Errorf("%w: %v", ErrFailedLoadKeyFile, err) //nolint:errorlint
}
v, err := fromData(raw, hashAlgorithm)
if err != nil {
return nil, fmt.Errorf("failed to get verifier from data: %w", err)
}
return v, nil
}
// fromSecret fetches the public key from SecretRef and returns the verifier
// hashAlgorithm is provided to determine the hash algorithm of the key
func fromSecret(ctx context.Context, secretRef string, hashAlgorithm crypto.Hash, k8s kubernetes.Interface) (signature.Verifier, error) {
if strings.HasPrefix(secretRef, keyReference) {
s, err := getKeyPairSecret(ctx, secretRef, k8s)
if err != nil {
return nil, fmt.Errorf("failed to get secret: %w", err)
}
// only 1 public key should be in the secret
if len(s.Data) == 0 {
return nil, fmt.Errorf("secret %q contains no data %w", secretRef, ErrEmptySecretData)
}
if len(s.Data) > 1 {
return nil, fmt.Errorf("secret %q contains multiple data entries, only one is supported. %w", secretRef, ErrMultipleSecretData)
}
for _, raw := range s.Data {
v, err := fromData(raw, hashAlgorithm)
if err != nil {
return nil, fmt.Errorf("failed to get verifier from secret data: %w", err)
}
return v, nil
}
}
return nil, fmt.Errorf("%w: secretRef %v is invalid", ErrK8sSpecificationInvalid, secretRef)
}
// fromData fetches the public key from raw data and returns the verifier
func fromData(raw []byte, hashAlgorithm crypto.Hash) (signature.Verifier, error) {
pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(raw)
if err != nil {
return nil, fmt.Errorf("%w: %v", ErrDecodeKey, err) //nolint:errorlint
}
v, err := signature.LoadVerifier(pubKey, hashAlgorithm)
if err != nil {
return nil, fmt.Errorf("%w: %v", ErrLoadVerifier, err) //nolint:errorlint
}
return v, nil
}
// getKeyPairSecret fetches the secret from a k8sRef
// TODO(#5884): use a secret lister to fetch secrets
func getKeyPairSecret(ctx context.Context, k8sRef string, k8s kubernetes.Interface) (*v1.Secret, error) {
split := strings.Split(strings.TrimPrefix(k8sRef, keyReference), "/")
if len(split) != 2 {
return nil, ErrK8sSpecificationInvalid
}
namespace, name := split[0], split[1]
s, err := k8s.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("%w: %v", ErrSecretNotFound, err) //nolint:errorlint
}
return s, nil
}
// matchHashAlgorithm returns a crypto.Hash code using an algorithm name as input parameter
func matchHashAlgorithm(algorithmName v1alpha1.HashAlgorithm) (crypto.Hash, error) {
normalizedAlgo := strings.ToLower(string(algorithmName))
algo, exists := v1alpha1.SupportedSignatureAlgorithms[v1alpha1.HashAlgorithm(normalizedAlgo)]
if !exists {
return crypto.SHA256, fmt.Errorf("%w: %s", ErrAlgorithmInvalid, algorithmName)
}
return algo, nil
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package trustedresources
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"regexp"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/tektoncd/pipeline/pkg/apis/config"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/trustedresources/verifier"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"knative.dev/pkg/apis"
"knative.dev/pkg/logging"
)
const (
// SignatureAnnotation is the key of signature in annotation map
SignatureAnnotation = "tekton.dev/signature"
// ConditionTrustedResourcesVerified specifies that the resources pass trusted resources verification or not.
ConditionTrustedResourcesVerified apis.ConditionType = "TrustedResourcesVerified"
)
const (
VerificationSkip = iota
VerificationPass
VerificationWarn
VerificationError
)
type Hashable interface {
Checksum() ([]byte, error)
}
// VerificationResultType indicates different cases of a verification result
type VerificationResultType int
// VerificationResult contains the type and message about the result of verification
type VerificationResult struct {
// VerificationResultType has 4 types which is corresponding to 4 cases:
// 0 (VerificationSkip): The verification was skipped. Err is nil in this case.
// 1 (VerificationPass): The verification passed. Err is nil in this case.
// 2 (VerificationWarn): A warning is logged. It could be no matching policies and feature flag "no-match-policy" is "warn", or only Warn mode verification policies fail.
// 3 (VerificationError): The verification failed, it could be the signature doesn't match the public key, no matching policies and "no-match-policy" is set to "fail" or there are errors during verification.
VerificationResultType VerificationResultType
// Err contains the error message when there is a warning logged or error returned.
Err error
}
// VerifyResource verifies the signature and public key against resource (v1beta1 and v1 task and pipeline).
// VerificationResult is returned with different types for different cases:
// 1) Return VerificationResult with VerificationSkip type, when no policies are found and no-match-policy is set to ignore
// 2) Return VerificationResult with VerificationPass type when verification passed;
// 3) Return VerificationResult with VerificationWarn type, when no matching policies and feature flag "no-match-policy" is "warn", or only Warn mode verification policies fail. Err field is filled with the warning;
// 4) Return VerificationResult with VerificationError type when no policies are found and no-match-policy is set to fail, the resource fails to pass matched enforce verification policy, or there are errors during verification. Err is filled with the err.
// refSource contains the source information of the resource.
func VerifyResource(ctx context.Context, resource metav1.Object, k8s kubernetes.Interface, refSource *v1.RefSource, verificationpolicies []*v1alpha1.VerificationPolicy) VerificationResult {
var refSourceURI string
if refSource != nil {
refSourceURI = refSource.URI
}
matchedPolicies, err := getMatchedPolicies(resource.GetName(), refSourceURI, verificationpolicies)
if err != nil {
if errors.Is(err, ErrNoMatchedPolicies) {
switch config.GetVerificationNoMatchPolicy(ctx) {
case config.IgnoreNoMatchPolicy:
return VerificationResult{VerificationResultType: VerificationSkip}
case config.WarnNoMatchPolicy:
logger := logging.FromContext(ctx)
warning := fmt.Errorf("failed to get matched policies: %w", err)
logger.Warnf(warning.Error())
return VerificationResult{VerificationResultType: VerificationWarn, Err: warning}
}
}
return VerificationResult{VerificationResultType: VerificationError, Err: fmt.Errorf("failed to get matched policies: %w", err)}
}
signature, err := extractSignature(resource)
if err != nil {
return VerificationResult{VerificationResultType: VerificationError, Err: err}
}
return verifyResource(ctx, resource, k8s, signature, matchedPolicies)
}
// VerifyTask is the deprecated, this is to keep backward compatibility
func VerifyTask(ctx context.Context, taskObj *v1beta1.Task, k8s kubernetes.Interface, refSource *v1.RefSource, verificationpolicies []*v1alpha1.VerificationPolicy) VerificationResult {
return VerifyResource(ctx, taskObj, k8s, refSource, verificationpolicies)
}
// VerifyPipeline is the deprecated, this is to keep backward compatibility
func VerifyPipeline(ctx context.Context, pipelineObj *v1beta1.Pipeline, k8s kubernetes.Interface, refSource *v1.RefSource, verificationpolicies []*v1alpha1.VerificationPolicy) VerificationResult {
return VerifyResource(ctx, pipelineObj, k8s, refSource, verificationpolicies)
}
// getMatchedPolicies filters out the policies by checking if the resource url (source) is matching any of the `patterns` in the `resources` list.
func getMatchedPolicies(resourceName string, source string, policies []*v1alpha1.VerificationPolicy) ([]*v1alpha1.VerificationPolicy, error) {
matchedPolicies := []*v1alpha1.VerificationPolicy{}
for _, p := range policies {
for _, r := range p.Spec.Resources {
matching, err := regexp.MatchString(r.Pattern, source)
if err != nil {
// FixMe: changing %v to %w breaks integration tests.
return matchedPolicies, fmt.Errorf("%v: %w", err, ErrRegexMatch) //nolint:errorlint
}
if matching {
matchedPolicies = append(matchedPolicies, p)
break
}
}
}
if len(matchedPolicies) == 0 {
return matchedPolicies, fmt.Errorf("%w: no matching policies are found for resource: %s against source: %s", ErrNoMatchedPolicies, resourceName, source)
}
return matchedPolicies, nil
}
// verifyResource verifies resource which implements metav1.Object by provided signature and public keys from verification policies.
// For matched policies, `verifyResource“ will adopt the following rules to do verification:
// 1. If multiple policies match, the resource must satisfy all the "enforce" policies to pass verification. The matching "enforce" policies are evaluated using AND logic.
// Alternatively, if the resource only matches policies in "warn" mode, it will still pass verification and only log a warning if these policies are not satisfied.
// 2. To pass one policy, the resource can pass any public keys in the policy. We use OR logic on public keys of one policy.
//
// TODO(#6683): return all failed policies in error.
func verifyResource(ctx context.Context, resource metav1.Object, k8s kubernetes.Interface, signature []byte, matchedPolicies []*v1alpha1.VerificationPolicy) VerificationResult {
logger := logging.FromContext(ctx)
var warnPolicies []*v1alpha1.VerificationPolicy
var enforcePolicies []*v1alpha1.VerificationPolicy
for _, p := range matchedPolicies {
if p.Spec.Mode == v1alpha1.ModeWarn {
warnPolicies = append(warnPolicies, p)
} else {
enforcePolicies = append(enforcePolicies, p)
}
}
// get the checksum of the resource
checksumBytes, err := getChecksum(resource)
if err != nil {
return VerificationResult{VerificationResultType: VerificationError, Err: err}
}
// first evaluate all enforce policies. Return VerificationError type of VerificationResult if any policy fails.
for _, p := range enforcePolicies {
verifiers, err := verifier.FromPolicy(ctx, k8s, p)
if err != nil {
return VerificationResult{VerificationResultType: VerificationError, Err: fmt.Errorf("failed to get verifiers from policy: %w", err)}
}
passVerification := doesAnyVerifierPass(ctx, checksumBytes, signature, verifiers)
if !passVerification {
return VerificationResult{VerificationResultType: VerificationError, Err: fmt.Errorf("%w: resource %s in namespace %s fails verification", ErrResourceVerificationFailed, resource.GetName(), resource.GetNamespace())}
}
}
// then evaluate all warn policies. Return VerificationWarn type of VerificationResult if any warn policies fails.
for _, p := range warnPolicies {
verifiers, err := verifier.FromPolicy(ctx, k8s, p)
if err != nil {
warn := fmt.Errorf("failed to get verifiers for resource %s from namespace %s: %w", resource.GetName(), resource.GetNamespace(), err)
logger.Warnf(warn.Error())
return VerificationResult{VerificationResultType: VerificationWarn, Err: warn}
}
passVerification := doesAnyVerifierPass(ctx, checksumBytes, signature, verifiers)
if !passVerification {
warn := fmt.Errorf("%w: resource %s in namespace %s fails verification", ErrResourceVerificationFailed, resource.GetName(), resource.GetNamespace())
logger.Warnf(warn.Error())
return VerificationResult{VerificationResultType: VerificationWarn, Err: warn}
}
}
return VerificationResult{VerificationResultType: VerificationPass}
}
// doesAnyVerifierPass loop over verifiers to verify the checksum and the signature, return true if any verifier pass verification.
func doesAnyVerifierPass(ctx context.Context, checksumBytes []byte, signature []byte, verifiers []signature.Verifier) bool {
logger := logging.FromContext(ctx)
passVerification := false
for _, verifier := range verifiers {
if err := verifier.VerifySignature(bytes.NewReader(signature), bytes.NewReader(checksumBytes)); err == nil {
// if one of the verifier passes verification, then this policy passes verification
passVerification = true
break
} else {
// FixMe: changing %v to %w breaks integration tests.
warn := fmt.Errorf("%w:%v", ErrResourceVerificationFailed, err.Error())
logger.Warnf(warn.Error())
}
}
return passVerification
}
// extractSignature extracts the signature if it is present in the metadata.
// Returns a non-nil error if the signature cannot be decoded.
func extractSignature(in metav1.Object) ([]byte, error) {
// signature should be contained in annotation
sig, ok := in.GetAnnotations()[SignatureAnnotation]
if !ok {
return nil, nil
}
// extract signature
signature, err := base64.StdEncoding.DecodeString(sig)
if err != nil {
return nil, err
}
return signature, nil
}
// getChecksum gets the sha256 checksum of the resource.
// Returns a non-nil error if the checksum cannot be computed or the resource is of unknown type.
func getChecksum(resource metav1.Object) ([]byte, error) {
h, ok := resource.(Hashable)
if !ok {
return nil, fmt.Errorf("%w: resource %T is not a Hashable type", ErrResourceNotSupported, resource)
}
checksumBytes, err := h.Checksum()
if err != nil {
return nil, err
}
return checksumBytes, nil
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workspace
import (
"context"
"fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
pkgnames "github.com/tektoncd/pipeline/pkg/names"
"github.com/tektoncd/pipeline/pkg/substitution"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
volumeNameBase = "ws"
defaultRandomLength = 5
)
// nameVolumeMap is a map from a workspace's name to its Volume.
type nameVolumeMap map[string]corev1.Volume
// setVolumeSource assigns a volume to a workspace's name.
func (nvm nameVolumeMap) setVolumeSource(workspaceName string, volumeName string, source corev1.VolumeSource) {
nvm[workspaceName] = corev1.Volume{
Name: volumeName,
VolumeSource: source,
}
}
// generateVolumeName generates a unique name for a volume based on the workspace name.
func generateVolumeName(name string) string {
return pkgnames.GenerateHashedName(volumeNameBase, name, defaultRandomLength)
}
// CreateVolumes will return a dictionary where the keys are the names of the workspaces bound in
// wb and the value is a newly-created Volume to use. If the same Volume is bound twice, the
// resulting volumes will both have the same name to prevent the same Volume from being attached
// to a pod twice. The names of the returned volumes will be a short hash string starting "ws-".
func CreateVolumes(wb []v1.WorkspaceBinding) map[string]corev1.Volume {
pvcs := map[string]corev1.Volume{}
v := make(nameVolumeMap, len(wb))
// Track the names we've used so far to avoid collisions
usedNames := make(map[string]struct{}, len(wb))
for _, w := range wb {
name := generateVolumeName(w.Name)
// If we've already generated this name, try appending extra characters until we find a unique name
for _, exists := usedNames[name]; exists; _, exists = usedNames[name] {
name = generateVolumeName(name + "$")
}
// Track the name we've used
usedNames[name] = struct{}{}
switch {
case w.PersistentVolumeClaim != nil:
// If it's a PVC, we need to check if we've encountered it before so we avoid mounting it twice
if vv, ok := pvcs[w.PersistentVolumeClaim.ClaimName]; ok {
v[w.Name] = vv
} else {
pvc := *w.PersistentVolumeClaim
v.setVolumeSource(w.Name, name, corev1.VolumeSource{PersistentVolumeClaim: &pvc})
pvcs[pvc.ClaimName] = v[w.Name]
}
case w.EmptyDir != nil:
ed := *w.EmptyDir
v.setVolumeSource(w.Name, name, corev1.VolumeSource{EmptyDir: &ed})
case w.ConfigMap != nil:
cm := *w.ConfigMap
v.setVolumeSource(w.Name, name, corev1.VolumeSource{ConfigMap: &cm})
case w.Secret != nil:
s := *w.Secret
v.setVolumeSource(w.Name, name, corev1.VolumeSource{Secret: &s})
case w.Projected != nil:
s := *w.Projected
v.setVolumeSource(w.Name, name, corev1.VolumeSource{Projected: &s})
case w.CSI != nil:
csi := *w.CSI
v.setVolumeSource(w.Name, name, corev1.VolumeSource{CSI: &csi})
}
}
return v
}
func getDeclaredWorkspace(name string, w []v1.WorkspaceDeclaration) (*v1.WorkspaceDeclaration, error) {
for _, workspace := range w {
if workspace.Name == name {
return &workspace, nil
}
}
// Trusting validation to ensure
return nil, fmt.Errorf("even though validation should have caught it, bound workspace %s did not exist in declared workspaces", name)
}
// Apply will update the StepTemplate, Sidecars and Volumes declaration in ts so that the workspaces
// specified through wb combined with the declared workspaces in ts will be available for
// all Step and Sidecar containers in the resulting pod.
func Apply(ctx context.Context, ts v1.TaskSpec, wb []v1.WorkspaceBinding, v map[string]corev1.Volume) (*v1.TaskSpec, error) {
// If there are no bound workspaces, we don't need to do anything
if len(wb) == 0 {
return &ts, nil
}
addedVolumes := sets.NewString()
// Initialize StepTemplate if it hasn't been already
if ts.StepTemplate == nil {
ts.StepTemplate = &v1.StepTemplate{}
}
isolatedWorkspaces := sets.NewString()
for _, step := range ts.Steps {
for _, workspaceUsage := range step.Workspaces {
isolatedWorkspaces.Insert(workspaceUsage.Name)
}
}
for _, sidecar := range ts.Sidecars {
for _, workspaceUsage := range sidecar.Workspaces {
isolatedWorkspaces.Insert(workspaceUsage.Name)
}
}
for i := range wb {
// Propagate missing Workspaces
addWorkspace := true
for _, ws := range ts.Workspaces {
if ws.Name == wb[i].Name {
addWorkspace = false
break
}
}
if addWorkspace {
ts.Workspaces = append(ts.Workspaces, v1.WorkspaceDeclaration{Name: wb[i].Name})
}
w, err := getDeclaredWorkspace(wb[i].Name, ts.Workspaces)
if err != nil {
return nil, err
}
// Get the volume we should be using for this binding
vv := v[wb[i].Name]
volumeMount := corev1.VolumeMount{
Name: vv.Name,
MountPath: w.GetMountPath(),
SubPath: wb[i].SubPath,
ReadOnly: w.ReadOnly,
}
if isolatedWorkspaces.Has(w.Name) {
mountAsIsolatedWorkspace(ts, w.Name, volumeMount)
} else {
mountAsSharedWorkspace(ts, volumeMount)
}
// Only add this volume if it hasn't already been added
if !addedVolumes.Has(vv.Name) {
ts.Volumes = append(ts.Volumes, vv)
addedVolumes.Insert(vv.Name)
}
}
return &ts, nil
}
// mountAsSharedWorkspace takes a volumeMount and adds it to all the steps and sidecars in
// a TaskSpec.
func mountAsSharedWorkspace(ts v1.TaskSpec, volumeMount corev1.VolumeMount) {
ts.StepTemplate.VolumeMounts = append(ts.StepTemplate.VolumeMounts, volumeMount)
for i := range ts.Sidecars {
AddSidecarVolumeMount(&ts.Sidecars[i], volumeMount)
}
}
// mountAsIsolatedWorkspace takes a volumeMount and adds it only to the steps and sidecars
// that have requested access to it.
func mountAsIsolatedWorkspace(ts v1.TaskSpec, workspaceName string, volumeMount corev1.VolumeMount) {
for i := range ts.Steps {
step := &ts.Steps[i]
for _, workspaceUsage := range step.Workspaces {
if workspaceUsage.Name == workspaceName {
vm := volumeMount
if workspaceUsage.MountPath != "" {
vm.MountPath = workspaceUsage.MountPath
}
step.VolumeMounts = append(step.VolumeMounts, vm)
break
}
}
}
for i := range ts.Sidecars {
sidecar := &ts.Sidecars[i]
for _, workspaceUsage := range sidecar.Workspaces {
if workspaceUsage.Name == workspaceName {
vm := volumeMount
if workspaceUsage.MountPath != "" {
vm.MountPath = workspaceUsage.MountPath
}
sidecar.VolumeMounts = append(sidecar.VolumeMounts, vm)
break
}
}
}
}
// AddSidecarVolumeMount is a helper to add a volumeMount to the sidecar unless its
// MountPath would conflict with another of the sidecar's existing volume mounts.
func AddSidecarVolumeMount(sidecar *v1.Sidecar, volumeMount corev1.VolumeMount) {
for j := range sidecar.VolumeMounts {
if sidecar.VolumeMounts[j].MountPath == volumeMount.MountPath {
return
}
}
sidecar.VolumeMounts = append(sidecar.VolumeMounts, volumeMount)
}
func findWorkspaceSubstitutionLocationsInSidecars(sidecars []v1.Sidecar) sets.String {
locationsToCheck := sets.NewString()
for _, sidecar := range sidecars {
locationsToCheck.Insert(sidecar.Script)
for i := range sidecar.Args {
locationsToCheck.Insert(sidecar.Args[i])
}
for i := range sidecar.Command {
locationsToCheck.Insert(sidecar.Command[i])
}
locationsToCheck.Insert(sidecar.WorkingDir)
for _, e := range sidecar.Env {
locationsToCheck.Insert(e.Value)
}
}
return locationsToCheck
}
func findWorkspaceSubstitutionLocationsInSteps(steps []v1.Step) sets.String {
locationsToCheck := sets.NewString()
for _, step := range steps {
locationsToCheck.Insert(step.Script)
for i := range step.Args {
locationsToCheck.Insert(step.Args[i])
}
for i := range step.Command {
locationsToCheck.Insert(step.Command[i])
}
locationsToCheck.Insert(step.WorkingDir)
for _, e := range step.Env {
locationsToCheck.Insert(e.Value)
}
for _, p := range step.Params {
locationsToCheck.Insert(p.Value.ArrayVal...)
for k := range p.Value.ObjectVal {
locationsToCheck.Insert(p.Value.ObjectVal[k])
}
locationsToCheck.Insert(p.Value.StringVal)
}
}
return locationsToCheck
}
func findWorkspaceSubstitutionLocationsInStepTemplate(stepTemplate *v1.StepTemplate) sets.String {
locationsToCheck := sets.NewString()
if stepTemplate != nil {
for i := range stepTemplate.Args {
locationsToCheck.Insert(stepTemplate.Args[i])
}
for i := range stepTemplate.Command {
locationsToCheck.Insert(stepTemplate.Command[i])
}
locationsToCheck.Insert(stepTemplate.WorkingDir)
for _, e := range stepTemplate.Env {
locationsToCheck.Insert(e.Value)
}
}
return locationsToCheck
}
// FindWorkspacesUsedByTask returns a set of all the workspaces that the TaskSpec uses.
func FindWorkspacesUsedByTask(ts v1.TaskSpec) (sets.String, error) {
locationsToCheck := sets.NewString()
locationsToCheck.Insert(findWorkspaceSubstitutionLocationsInSteps(ts.Steps).List()...)
locationsToCheck.Insert(findWorkspaceSubstitutionLocationsInSidecars(ts.Sidecars).List()...)
locationsToCheck.Insert(findWorkspaceSubstitutionLocationsInStepTemplate(ts.StepTemplate).List()...)
workspacesUsedInSteps := sets.NewString()
for item := range locationsToCheck {
workspacesUsed, _, errString := substitution.ExtractVariablesFromString(item, "workspaces")
if errString != "" {
return workspacesUsedInSteps, fmt.Errorf("Error while extracting workspace: %s", errString)
}
workspacesUsedInSteps.Insert(workspacesUsed...)
}
return workspacesUsedInSteps, nil
}
// ReplaceWorkspaceBindingsVars returns a new slice of WorkspaceBinding with references to parameters replaced,
// based on the mapping provided in replacements.
func ReplaceWorkspaceBindingsVars(wbs []v1.WorkspaceBinding, replacements map[string]string) []v1.WorkspaceBinding {
for i := range wbs {
replaceWorkspaceBindingVars(&wbs[i], replacements)
}
return wbs
}
// replaceWorkspaceBindingVars returns a new WorkspaceBinding with references to parameters replaced,
// based on the mapping provided in replacements.
func replaceWorkspaceBindingVars(wb *v1.WorkspaceBinding, replacements map[string]string) *v1.WorkspaceBinding {
wb.SubPath = substitution.ApplyReplacements(wb.SubPath, replacements)
if wb.PersistentVolumeClaim != nil {
wb.PersistentVolumeClaim = applyPersistentVolumeClaimVolumeSource(wb.PersistentVolumeClaim, replacements)
}
if wb.ConfigMap != nil {
wb.ConfigMap = applyConfigMapVolumeSource(wb.ConfigMap, replacements)
}
if wb.Secret != nil {
wb.Secret = applySecretVolumeSource(wb.Secret, replacements)
}
if wb.Projected != nil {
for j, source := range wb.Projected.Sources {
if source.ConfigMap != nil {
wb.Projected.Sources[j].ConfigMap = applyConfigMapProjection(wb.Projected.Sources[j].ConfigMap, replacements)
}
if source.Secret != nil {
wb.Projected.Sources[j].Secret = applySecretProjection(wb.Projected.Sources[j].Secret, replacements)
}
}
}
if wb.CSI != nil {
wb.CSI = applyCSIVolumeSource(wb.CSI, replacements)
}
return wb
}
func applyPersistentVolumeClaimVolumeSource(pvc *corev1.PersistentVolumeClaimVolumeSource,
replacements map[string]string) *corev1.PersistentVolumeClaimVolumeSource {
pvc.ClaimName = substitution.ApplyReplacements(pvc.ClaimName, replacements)
return pvc
}
func applyConfigMapVolumeSource(cm *corev1.ConfigMapVolumeSource, replacements map[string]string) *corev1.ConfigMapVolumeSource {
cm.Name = substitution.ApplyReplacements(cm.Name, replacements)
cm.Items = applyKeyToPathItems(cm.Items, replacements)
return cm
}
func applySecretVolumeSource(s *corev1.SecretVolumeSource, replacements map[string]string) *corev1.SecretVolumeSource {
s.SecretName = substitution.ApplyReplacements(s.SecretName, replacements)
s.Items = applyKeyToPathItems(s.Items, replacements)
return s
}
func applyConfigMapProjection(cm *corev1.ConfigMapProjection, replacements map[string]string) *corev1.ConfigMapProjection {
cm.Name = substitution.ApplyReplacements(cm.Name, replacements)
cm.Items = applyKeyToPathItems(cm.Items, replacements)
return cm
}
func applySecretProjection(s *corev1.SecretProjection, replacements map[string]string) *corev1.SecretProjection {
s.Name = substitution.ApplyReplacements(s.Name, replacements)
s.Items = applyKeyToPathItems(s.Items, replacements)
return s
}
func applyCSIVolumeSource(csi *corev1.CSIVolumeSource, replacements map[string]string) *corev1.CSIVolumeSource {
csi.Driver = substitution.ApplyReplacements(csi.Driver, replacements)
if csi.NodePublishSecretRef != nil {
csi.NodePublishSecretRef.Name = substitution.ApplyReplacements(csi.NodePublishSecretRef.Name, replacements)
}
return csi
}
func applyKeyToPathItems(items []corev1.KeyToPath, replacements map[string]string) []corev1.KeyToPath {
for i := range items {
item := &items[i]
item.Key = substitution.ApplyReplacements(item.Key, replacements)
item.Path = substitution.ApplyReplacements(item.Path, replacements)
}
return items
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workspace
import (
"context"
"errors"
"fmt"
pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
// ValidateBindings will return an error if the bound workspaces in binds don't satisfy the declared
// workspaces in decls.
func ValidateBindings(ctx context.Context, decls []v1.WorkspaceDeclaration, binds []v1.WorkspaceBinding) error {
// This will also be validated at webhook time but in case the webhook isn't invoked for some
// reason we'll invoke the same validation here.
for _, b := range binds {
if err := b.Validate(ctx); err != nil {
return pipelineErrors.WrapUserError(fmt.Errorf("binding %q is invalid: %w", b.Name, err))
}
}
declNames := sets.NewString()
bindNames := sets.NewString()
for _, decl := range decls {
declNames.Insert(decl.Name)
}
for _, bind := range binds {
bindNames.Insert(bind.Name)
}
for _, decl := range decls {
if decl.Optional {
continue
}
if !bindNames.Has(decl.Name) {
return pipelineErrors.WrapUserError(fmt.Errorf("declared workspace %q is required but has not been bound", decl.Name))
}
}
for _, bind := range binds {
if !declNames.Has(bind.Name) {
return pipelineErrors.WrapUserError(fmt.Errorf("workspace binding %q does not match any declared workspace", bind.Name))
}
}
return nil
}
// ValidateOnlyOnePVCIsUsed checks that a list of WorkspaceBinding uses only one
// persistent volume claim.
//
// This is only useful to validate that WorkspaceBindings in TaskRuns are compatible
// with affinity rules enforced by the AffinityAssistant.
func ValidateOnlyOnePVCIsUsed(wb []v1.WorkspaceBinding) error {
workspaceVolumes := make(map[string]bool)
for _, w := range wb {
if w.PersistentVolumeClaim != nil {
workspaceVolumes[w.PersistentVolumeClaim.ClaimName] = true
}
if w.VolumeClaimTemplate != nil {
workspaceVolumes[w.Name] = true
}
}
if len(workspaceVolumes) > 1 {
return pipelineErrors.WrapUserError(errors.New("more than one PersistentVolumeClaim is bound"))
}
return nil
}
/*
Copyright 2025 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This utility can be used to generate a new release name in the format:
<cat breed> <robot name>
to be used for a Tekton Pipelines release.
It looks for cat breeds from CatAPIURL and it parses robot names out
of Wikipedia WikiURL. It filters names that have been used already,
based on the GitHub API GitHubReleasesURL
To use, run:
go run release_names.go
Example output:
{
"release_name": "California Spangled Clank",
"cat_breed_url": "https://en.wikipedia.org/wiki/California_Spangled",
"robot_url": "https://en.wikipedia.org/wiki/Clank"
}
*/
package main
import (
"context"
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"io"
"math/big"
"net/http"
"regexp"
"strings"
)
// API Endpoints
const (
CatAPIURL = "https://api.thecatapi.com/v1/breeds"
RobotWikiURL = "https://en.wikipedia.org/wiki/List_of_fictional_robots_and_androids"
WikiURL = "https://en.wikipedia.org/wiki/"
GitHubReleasesURL = "https://api.github.com/repos/tektoncd/pipeline/releases"
)
// Structs to hold API responses
type CatBreed struct {
Name string `json:"name"`
}
type Release struct {
Name string `json:"name"`
}
func httpGet(url string) (*http.Response, error) {
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", "golang-tekton-bot/0.1")
return http.DefaultClient.Do(req)
}
// Fetch cat breeds and organize them by first letter
func getCatBreeds() (map[string][]string, error) {
resp, err := httpGet(CatAPIURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var breeds []CatBreed
if err := json.NewDecoder(resp.Body).Decode(&breeds); err != nil {
return nil, err
}
catDict := make(map[string][]string)
for _, breed := range breeds {
firstLetter := strings.ToUpper(string(breed.Name[0]))
catDict[firstLetter] = append(catDict[firstLetter], breed.Name)
}
return catDict, nil
}
// Scrape Wikipedia for robot names
func getRobotNames() (map[string][]string, error) {
resp, err := httpGet(RobotWikiURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
robotDict := make(map[string][]string)
// Regex to extract robot names from <li><b>Robot Name</b>
re := regexp.MustCompile(`<li>\s*<b>\s*<a[^>]*>([^<]+)</a>\s*</b>`)
matches := re.FindAllStringSubmatch(string(bodyBytes), -1)
for _, match := range matches {
if len(match) > 1 {
name := strings.TrimSpace(match[1])
firstLetter := strings.ToUpper(string(name[0]))
robotDict[firstLetter] = append(robotDict[firstLetter], name)
}
}
return robotDict, nil
}
// Fetch past releases from GitHub
func getPastReleases() (map[string]bool, error) {
pastReleases := make(map[string]bool)
page := 1
perPage := 100
// Loop until we get an page smaller than perPage (or empty)
for {
url := fmt.Sprintf("%s?per_page=%d&page=%d", GitHubReleasesURL, perPage, page)
resp, err := httpGet(url)
if err != nil {
return nil, fmt.Errorf("failed to fetch releases page %d: %w", page, err)
}
defer resp.Body.Close()
var pageReleases []Release
if err := json.NewDecoder(resp.Body).Decode(&pageReleases); err != nil {
return nil, fmt.Errorf("failed to fetch releases page %d: %w", page, err)
}
// If we got an empty page, we've reached the end
if len(pageReleases) == 0 {
break
}
pastReleases := make(map[string]bool)
for _, release := range pageReleases {
pastReleases[release.Name] = true
}
// If we got fewer than the requested number, we've reached the end
if len(pageReleases) < perPage {
break
}
page++
}
return pastReleases, nil
}
func randomElement(array []string) (string, error) {
n, err := rand.Int(rand.Reader, big.NewInt(int64(len(array))))
if err != nil {
return "", err
}
return array[n.Int64()], nil
}
// Generate a unique release name
func generateUniqueTuple() (string, string, error) {
catBreeds, err := getCatBreeds()
if err != nil {
return "", "", err
}
robotNames, err := getRobotNames()
if err != nil {
return "", "", err
}
pastReleases, err := getPastReleases()
if err != nil {
return "", "", err
}
// Find common letters
commonLetters := []string{}
for letter := range catBreeds {
if _, exists := robotNames[letter]; exists {
commonLetters = append(commonLetters, letter)
}
}
if len(commonLetters) == 0 {
return "", "", errors.New("no matching names found")
}
maxAttempts := 10
for range maxAttempts {
chosenLetter, err := randomElement(commonLetters)
if err != nil {
return "", "", err
}
cat, err := randomElement(catBreeds[chosenLetter])
if err != nil {
return "", "", err
}
robot, err := randomElement(robotNames[chosenLetter])
if err != nil {
return "", "", err
}
newName := cat + " " + robot
if !pastReleases[newName] {
return cat, robot, nil
}
}
return "", "", errors.New("could not generate a unique name after multiple attempts")
}
func printJsonError(err error) {
fmt.Println(`{"error": "` + err.Error() + `"}`) //nolint:forbidigo
}
func main() {
cat, robot, err := generateUniqueTuple()
if err != nil {
printJsonError(err)
return
}
output := map[string]string{
"release_name": cat + " " + robot,
"cat_breed_url": WikiURL + strings.ReplaceAll(cat, " ", "_"),
"robot_url": WikiURL + strings.ReplaceAll(robot, " ", "_"),
}
jsonOutput, err := json.MarshalIndent(output, "", " ")
if err != nil {
printJsonError(err)
return
}
fmt.Println(string(jsonOutput)) //nolint:forbidigo
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"context"
"fmt"
"io"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"knative.dev/pkg/test/logging"
)
// CollectPodLogs will get the logs for all containers in a Pod
func CollectPodLogs(ctx context.Context, c *clients, podName, namespace string, logf logging.FormatLogger) {
logs, err := getContainersLogsFromPod(ctx, c.KubeClient, podName, namespace)
if err != nil {
logf("Could not get logs for pod %s: %s", podName, err)
}
logf("build logs %s", logs)
}
func getContainersLogsFromPod(ctx context.Context, c kubernetes.Interface, pod, namespace string) (string, error) {
p, err := c.CoreV1().Pods(namespace).Get(ctx, pod, metav1.GetOptions{})
if err != nil {
return "", err
}
sb := strings.Builder{}
for _, container := range p.Spec.Containers {
sb.WriteString(fmt.Sprintf("\n>>> Pod %s Container %s:\n", p.Name, container.Name))
logs, err := getContainerLogsFromPod(ctx, c, pod, container.Name, namespace)
if err != nil {
return "", err
}
sb.WriteString(logs)
}
return sb.String(), nil
}
func getContainerLogsFromPod(ctx context.Context, c kubernetes.Interface, pod, container, namespace string) (string, error) {
sb := strings.Builder{}
// Do not follow, which will block until the Pod terminates, and potentially deadlock the test.
// If done in the wrong order, this could actually block things and prevent the Pod from being
// deleted at all.
req := c.CoreV1().Pods(namespace).GetLogs(pod, &corev1.PodLogOptions{Follow: false, Container: container})
rc, err := req.Stream(ctx)
if err != nil {
return "", err
}
bs, err := io.ReadAll(rc)
if err != nil {
return "", err
}
sb.Write(bs)
return sb.String(), nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Get access to client objects
To initialize client objects you can use the setup function. It returns a clients struct
that contains initialized clients for accessing:
- Kubernetes objects
- Pipelines (https://github.com/tektoncd/pipeline#pipeline)
For example, to create a Pipeline
_, err = clients.V1beta1PipelineClient.Pipelines.Create(test.Pipeline(namespaceName, pipelineName))
And you can use the client to clean up resources created by your test
func tearDown(clients *test.Clients) {
if clients != nil {
clients.Delete([]string{routeName}, []string{configName})
}
}
*/
package test
import (
"testing"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
v1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1"
resolutionversioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1"
resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1"
apixclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/kubernetes"
knativetest "knative.dev/pkg/test"
)
// clients holds instances of interfaces for making requests to the Pipeline controllers.
type clients struct {
KubeClient kubernetes.Interface
ApixClient apixclient.Interface
V1beta1PipelineClient v1beta1.PipelineInterface
V1beta1TaskClient v1beta1.TaskInterface
V1beta1TaskRunClient v1beta1.TaskRunInterface
V1beta1PipelineRunClient v1beta1.PipelineRunInterface
V1beta1CustomRunClient v1beta1.CustomRunInterface
V1alpha1ResolutionRequestclient resolutionv1alpha1.ResolutionRequestInterface
V1alpha1VerificationPolicyClient v1alpha1.VerificationPolicyInterface
V1PipelineClient v1.PipelineInterface
V1TaskClient v1.TaskInterface
V1TaskRunClient v1.TaskRunInterface
V1PipelineRunClient v1.PipelineRunInterface
V1beta1StepActionClient v1beta1.StepActionInterface
V1beta1ResolutionRequestclient resolutionv1beta1.ResolutionRequestInterface
}
// newClients instantiates and returns several clientsets required for making requests to the
// Pipeline cluster specified by the combination of clusterName and configPath. Clients can
// make requests within namespace.
func newClients(t *testing.T, configPath, clusterName, namespace string) *clients {
t.Helper()
var err error
c := &clients{}
cfg, err := knativetest.BuildClientConfig(configPath, clusterName)
if err != nil {
t.Fatalf("failed to create configuration obj from %s for cluster %s: %s", configPath, clusterName, err)
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
t.Fatalf("failed to create kubeclient from config file at %s: %s", configPath, err)
}
c.KubeClient = kubeClient
apixClient, err := apixclient.NewForConfig(cfg)
if err != nil {
t.Fatalf("failed to create apixclient from config file at %s: %s", configPath, err)
}
c.ApixClient = apixClient
cs, err := versioned.NewForConfig(cfg)
if err != nil {
t.Fatalf("failed to create pipeline clientset from config file at %s: %s", configPath, err)
}
rrcs, err := resolutionversioned.NewForConfig(cfg)
if err != nil {
t.Fatalf("failed to create resolution clientset from config file at %s: %s", configPath, err)
}
c.V1beta1PipelineClient = cs.TektonV1beta1().Pipelines(namespace)
c.V1beta1TaskClient = cs.TektonV1beta1().Tasks(namespace)
c.V1beta1TaskRunClient = cs.TektonV1beta1().TaskRuns(namespace)
c.V1beta1PipelineRunClient = cs.TektonV1beta1().PipelineRuns(namespace)
c.V1beta1CustomRunClient = cs.TektonV1beta1().CustomRuns(namespace)
c.V1alpha1ResolutionRequestclient = rrcs.ResolutionV1alpha1().ResolutionRequests(namespace)
c.V1alpha1VerificationPolicyClient = cs.TektonV1alpha1().VerificationPolicies(namespace)
c.V1PipelineClient = cs.TektonV1().Pipelines(namespace)
c.V1TaskClient = cs.TektonV1().Tasks(namespace)
c.V1TaskRunClient = cs.TektonV1().TaskRuns(namespace)
c.V1PipelineRunClient = cs.TektonV1().PipelineRuns(namespace)
c.V1beta1StepActionClient = cs.TektonV1beta1().StepActions(namespace)
c.V1beta1ResolutionRequestclient = rrcs.ResolutionV1beta1().ResolutionRequests(namespace)
return c
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"context"
"fmt"
"sync/atomic"
"testing"
// Link in the fakes so they get injected into injection.Fake
"github.com/tektoncd/pipeline/pkg/apis/config"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
fakepipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake"
informersv1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1"
informersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1"
informersv1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake"
fakepipelineinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipeline/fake"
fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun/fake"
faketaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/task/fake"
faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun/fake"
fakeverificationpolicyinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/fake"
fakecustomruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun/fake"
fakestepactioninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/stepaction/fake"
fakeresolutionclientset "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake"
resolutioninformersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1"
fakeresolutionrequestclient "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/fake"
fakeresolutionrequestinformer "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest/fake"
cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
coreinformers "k8s.io/client-go/informers/core/v1"
fakekubeclientset "k8s.io/client-go/kubernetes/fake"
ktesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake"
fakeconfigmapinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap/fake"
fakelimitrangeinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/fake"
fakefilteredpodinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod/filtered/fake"
fakesecretinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/secret/fake"
fakeserviceaccountinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount/fake"
"knative.dev/pkg/controller"
"knative.dev/pkg/system"
)
// Data represents the desired state of the system (i.e. existing resources) to seed controllers
// with.
type Data struct {
PipelineRuns []*v1.PipelineRun
Pipelines []*v1.Pipeline
TaskRuns []*v1.TaskRun
Tasks []*v1.Task
StepActions []*v1beta1.StepAction
CustomRuns []*v1beta1.CustomRun
Pods []*corev1.Pod
Namespaces []*corev1.Namespace
ConfigMaps []*corev1.ConfigMap
ServiceAccounts []*corev1.ServiceAccount
LimitRange []*corev1.LimitRange
ResolutionRequests []*resolutionv1alpha1.ResolutionRequest
ExpectedCloudEventCount int
VerificationPolicies []*v1alpha1.VerificationPolicy
Secrets []*corev1.Secret
}
// Clients holds references to clients which are useful for reconciler tests.
type Clients struct {
Pipeline *fakepipelineclientset.Clientset
Kube *fakekubeclientset.Clientset
CloudEvents cloudeventclient.CEClient
ResolutionRequests *fakeresolutionclientset.Clientset
}
// Informers holds references to informers which are useful for reconciler tests.
type Informers struct {
PipelineRun informersv1.PipelineRunInformer
Pipeline informersv1.PipelineInformer
TaskRun informersv1.TaskRunInformer
Run informersv1alpha1.RunInformer
CustomRun informersv1beta1.CustomRunInformer
Task informersv1.TaskInformer
StepAction informersv1beta1.StepActionInformer
Pod coreinformers.PodInformer
ConfigMap coreinformers.ConfigMapInformer
ServiceAccount coreinformers.ServiceAccountInformer
LimitRange coreinformers.LimitRangeInformer
ResolutionRequest resolutioninformersv1alpha1.ResolutionRequestInformer
VerificationPolicy informersv1alpha1.VerificationPolicyInformer
Secret coreinformers.SecretInformer
}
// Assets holds references to the controller, logs, clients, and informers.
type Assets struct {
Logger *zap.SugaredLogger
Controller *controller.Impl
Clients Clients
Informers Informers
Recorder *record.FakeRecorder
Ctx context.Context
}
// AddToInformer returns a function to add ktesting.Actions to the cache store
func AddToInformer(t *testing.T, store cache.Store) func(ktesting.Action) (bool, runtime.Object, error) {
t.Helper()
return func(action ktesting.Action) (bool, runtime.Object, error) {
switch a := action.(type) {
case ktesting.CreateActionImpl:
if err := store.Add(a.GetObject()); err != nil {
t.Fatal(err)
}
case ktesting.UpdateActionImpl:
objMeta, err := meta.Accessor(a.GetObject())
if err != nil {
return true, nil, err
}
// Look up the old copy of this resource and perform the optimistic concurrency check.
old, exists, err := store.GetByKey(objMeta.GetNamespace() + "/" + objMeta.GetName())
if err != nil {
return true, nil, err
} else if !exists {
// Let the client return the error.
return false, nil, nil
}
oldMeta, err := meta.Accessor(old)
if err != nil {
return true, nil, err
}
// If the resource version is mismatched, then fail with a conflict.
if oldMeta.GetResourceVersion() != objMeta.GetResourceVersion() {
return true, nil, apierrs.NewConflict(
a.Resource.GroupResource(), objMeta.GetName(),
fmt.Errorf("resourceVersion mismatch, got: %v, wanted: %v",
objMeta.GetResourceVersion(), oldMeta.GetResourceVersion()))
}
// Update the store with the new object when it's fine.
if err := store.Update(a.GetObject()); err != nil {
t.Fatal(err)
}
}
return false, nil, nil
}
}
// SeedTestData returns Clients and Informers populated with the
// given Data.
//
//nolint:revive
func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers) {
t.Helper()
c := Clients{
Kube: fakekubeclient.Get(ctx),
Pipeline: fakepipelineclient.Get(ctx),
CloudEvents: cloudeventclient.Get(ctx),
ResolutionRequests: fakeresolutionrequestclient.Get(ctx),
}
// Every time a resource is modified, change the metadata.resourceVersion.
PrependResourceVersionReactor(&c.Pipeline.Fake)
i := Informers{
PipelineRun: fakepipelineruninformer.Get(ctx),
Pipeline: fakepipelineinformer.Get(ctx),
TaskRun: faketaskruninformer.Get(ctx),
CustomRun: fakecustomruninformer.Get(ctx),
Task: faketaskinformer.Get(ctx),
StepAction: fakestepactioninformer.Get(ctx),
Pod: fakefilteredpodinformer.Get(ctx, v1.ManagedByLabelKey),
ConfigMap: fakeconfigmapinformer.Get(ctx),
ServiceAccount: fakeserviceaccountinformer.Get(ctx),
LimitRange: fakelimitrangeinformer.Get(ctx),
ResolutionRequest: fakeresolutionrequestinformer.Get(ctx),
VerificationPolicy: fakeverificationpolicyinformer.Get(ctx),
Secret: fakesecretinformer.Get(ctx),
}
// Attach reactors that add resource mutations to the appropriate
// informer index, and simulate optimistic concurrency failures when
// the resource version is mismatched.
c.Pipeline.PrependReactor("*", "pipelineruns", AddToInformer(t, i.PipelineRun.Informer().GetIndexer()))
for _, pr := range d.PipelineRuns {
pr := pr.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Pipeline.TektonV1().PipelineRuns(pr.Namespace).Create(ctx, pr, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.Pipeline.PrependReactor("*", "pipelines", AddToInformer(t, i.Pipeline.Informer().GetIndexer()))
for _, p := range d.Pipelines {
p := p.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Pipeline.TektonV1().Pipelines(p.Namespace).Create(ctx, p, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.Pipeline.PrependReactor("*", "taskruns", AddToInformer(t, i.TaskRun.Informer().GetIndexer()))
for _, tr := range d.TaskRuns {
tr := tr.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Pipeline.TektonV1().TaskRuns(tr.Namespace).Create(ctx, tr, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.Pipeline.PrependReactor("*", "tasks", AddToInformer(t, i.Task.Informer().GetIndexer()))
for _, ta := range d.Tasks {
ta := ta.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Pipeline.TektonV1().Tasks(ta.Namespace).Create(ctx, ta, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.Pipeline.PrependReactor("*", "stepactions", AddToInformer(t, i.StepAction.Informer().GetIndexer()))
for _, sa := range d.StepActions {
sa := sa.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Pipeline.TektonV1beta1().StepActions(sa.Namespace).Create(ctx, sa, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.Pipeline.PrependReactor("*", "customruns", AddToInformer(t, i.CustomRun.Informer().GetIndexer()))
for _, customRun := range d.CustomRuns {
customRun := customRun.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Pipeline.TektonV1beta1().CustomRuns(customRun.Namespace).Create(ctx, customRun, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.Kube.PrependReactor("*", "pods", AddToInformer(t, i.Pod.Informer().GetIndexer()))
for _, p := range d.Pods {
p := p.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Kube.CoreV1().Pods(p.Namespace).Create(ctx, p, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
for _, n := range d.Namespaces {
n := n.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Kube.CoreV1().Namespaces().Create(ctx, n, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.Kube.PrependReactor("*", "configmaps", AddToInformer(t, i.ConfigMap.Informer().GetIndexer()))
for _, cm := range d.ConfigMaps {
cm := cm.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Kube.CoreV1().ConfigMaps(cm.Namespace).Create(ctx, cm, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.Kube.PrependReactor("*", "serviceaccounts", AddToInformer(t, i.ServiceAccount.Informer().GetIndexer()))
for _, sa := range d.ServiceAccounts {
sa := sa.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Kube.CoreV1().ServiceAccounts(sa.Namespace).Create(ctx, sa, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.ResolutionRequests.PrependReactor("*", "resolutionrequests", AddToInformer(t, i.ResolutionRequest.Informer().GetIndexer()))
for _, rr := range d.ResolutionRequests {
rr := rr.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.ResolutionRequests.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Create(ctx, rr, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.Pipeline.PrependReactor("*", "verificationpolicies", AddToInformer(t, i.VerificationPolicy.Informer().GetIndexer()))
for _, vp := range d.VerificationPolicies {
vp := vp.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Pipeline.TektonV1alpha1().VerificationPolicies(vp.Namespace).Create(ctx, vp, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.Kube.PrependReactor("*", "secrets", AddToInformer(t, i.Secret.Informer().GetIndexer()))
for _, s := range d.Secrets {
s := s.DeepCopy() // Avoid assumptions that the informer's copy is modified.
if _, err := c.Kube.CoreV1().Secrets(s.Namespace).Create(ctx, s, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
c.Pipeline.ClearActions()
c.Kube.ClearActions()
c.ResolutionRequests.ClearActions()
return c, i
}
// ResourceVersionReactor is an implementation of Reactor for our tests
type ResourceVersionReactor struct {
count int64
}
// Handles returns whether our test reactor can handle a given ktesting.Action
func (r *ResourceVersionReactor) Handles(action ktesting.Action) bool {
body := func(o runtime.Object) bool {
objMeta, err := meta.Accessor(o)
if err != nil {
return false
}
val := atomic.AddInt64(&r.count, 1)
objMeta.SetResourceVersion(fmt.Sprintf("%05d", val))
return false
}
switch o := action.(type) {
case ktesting.CreateActionImpl:
return body(o.GetObject())
case ktesting.UpdateActionImpl:
return body(o.GetObject())
default:
return false
}
}
// React is noop-function
func (r *ResourceVersionReactor) React(action ktesting.Action) (handled bool, ret runtime.Object, err error) {
return false, nil, nil
}
var _ ktesting.Reactor = (*ResourceVersionReactor)(nil)
// PrependResourceVersionReactor will instrument a client-go testing Fake
// with a reactor that simulates resourceVersion changes on mutations.
// This does not work with patches.
func PrependResourceVersionReactor(f *ktesting.Fake) {
f.ReactionChain = append([]ktesting.Reactor{&ResourceVersionReactor{}}, f.ReactionChain...)
}
// EnsureConfigurationConfigMapsExist makes sure all the configmaps exists.
func EnsureConfigurationConfigMapsExist(d *Data) {
var defaultsExists, featureFlagsExists, metricsExists, spireconfigExists, eventsExists, tracingExists, backoffExists bool
for _, cm := range d.ConfigMaps {
if cm.Name == config.GetDefaultsConfigName() {
defaultsExists = true
}
if cm.Name == config.GetFeatureFlagsConfigName() {
featureFlagsExists = true
}
if cm.Name == config.GetMetricsConfigName() {
metricsExists = true
}
if cm.Name == config.GetSpireConfigName() {
spireconfigExists = true
}
if cm.Name == config.GetEventsConfigName() {
eventsExists = true
}
if cm.Name == config.GetTracingConfigName() {
tracingExists = true
}
if cm.Name == config.GetWaitExponentialBackoffConfigName() {
backoffExists = true
}
}
if !defaultsExists {
d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetDefaultsConfigName(), Namespace: system.Namespace()},
Data: map[string]string{},
})
}
if !featureFlagsExists {
d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()},
Data: map[string]string{},
})
}
if !metricsExists {
d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetMetricsConfigName(), Namespace: system.Namespace()},
Data: map[string]string{},
})
}
if !spireconfigExists {
d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetSpireConfigName(), Namespace: system.Namespace()},
Data: map[string]string{},
})
}
if !eventsExists {
d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetEventsConfigName(), Namespace: system.Namespace()},
Data: map[string]string{},
})
}
if !tracingExists {
d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetTracingConfigName(), Namespace: system.Namespace()},
Data: map[string]string{},
})
}
if !backoffExists {
d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: config.GetWaitExponentialBackoffConfigName(), Namespace: system.Namespace()},
Data: map[string]string{},
})
}
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diff
// PrintWantGot takes a diff string generated by cmp.Diff and returns it
// in a consistent format for reuse across all of our tests. This
// func assumes that the order of arguments passed to cmp.Diff was
// (want, got) or, in other words, the expectedResult then the actualResult.
func PrintWantGot(diff string) string {
return "(-want, +got): " + diff
}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"strings"
"testing"
"github.com/tektoncd/pipeline/pkg/apis/config"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/system"
)
// requireAnyGate returns a setup func that will skip the current
// test if none of the feature-flags in the given map match
// what's in the feature-flags ConfigMap. It will fatally fail
// the test if it cannot get the feature-flag configmap.
func requireAnyGate(gates map[string]string) func(context.Context, *testing.T, *clients, string) {
return func(ctx context.Context, t *testing.T, c *clients, namespace string) {
t.Helper()
featureFlagsCM, err := c.KubeClient.CoreV1().ConfigMaps(system.Namespace()).Get(ctx, config.GetFeatureFlagsConfigName(), metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get ConfigMap `%s`: %s", config.GetFeatureFlagsConfigName(), err)
}
resolverFeatureFlagsCM, err := c.KubeClient.CoreV1().ConfigMaps(resolverconfig.ResolversNamespace(system.Namespace())).
Get(ctx, resolverconfig.GetFeatureFlagsConfigName(), metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
t.Fatalf("Failed to get ConfigMap `%s`: %s", resolverconfig.GetFeatureFlagsConfigName(), err)
}
resolverMap := make(map[string]string)
if resolverFeatureFlagsCM != nil {
resolverMap = resolverFeatureFlagsCM.Data
}
pairs := []string{}
for name, value := range gates {
actual, ok := featureFlagsCM.Data[name]
if ok && value == actual {
return
}
actual, ok = resolverMap[name]
if ok && value == actual {
return
}
pairs = append(pairs, fmt.Sprintf("%q: %q", name, value))
}
t.Skipf("No feature flag in namespace %q matching %s\nExisting feature flag: %#v\nExisting resolver feature flag (in namespace %q): %#v",
system.Namespace(), strings.Join(pairs, " or "), featureFlagsCM.Data,
resolverconfig.ResolversNamespace(system.Namespace()), resolverMap)
}
}
// requireAllGates returns a setup func that will skip the current
// test if all of the feature-flags in the given map don't match
// what's in the feature-flags ConfigMap. It will fatally fail
// the test if it cannot get the feature-flag configmap.
func requireAllGates(gates map[string]string) func(context.Context, *testing.T, *clients, string) {
return func(ctx context.Context, t *testing.T, c *clients, namespace string) {
t.Helper()
featureFlagsCM, err := c.KubeClient.CoreV1().ConfigMaps(system.Namespace()).Get(ctx, config.GetFeatureFlagsConfigName(), metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get ConfigMap `%s`: %s", config.GetFeatureFlagsConfigName(), err)
}
resolverFeatureFlagsCM, err := c.KubeClient.CoreV1().ConfigMaps(resolverconfig.ResolversNamespace(system.Namespace())).
Get(ctx, resolverconfig.GetFeatureFlagsConfigName(), metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
t.Fatalf("Failed to get ConfigMap `%s`: %s", resolverconfig.GetFeatureFlagsConfigName(), err)
}
resolverMap := make(map[string]string)
if resolverFeatureFlagsCM != nil {
resolverMap = resolverFeatureFlagsCM.Data
}
pairs := []string{}
for name, value := range gates {
actual, ok := featureFlagsCM.Data[name]
if !ok {
actual, ok = resolverMap[name]
if !ok || value != actual {
pairs = append(pairs, fmt.Sprintf("%q is %q, want %s", name, actual, value))
}
} else if value != actual {
pairs = append(pairs, fmt.Sprintf("%q is %q, want %s", name, actual, value))
}
}
if len(pairs) > 0 {
t.Skipf("One or more feature flags not matching required: %s", strings.Join(pairs, "; "))
}
}
}
func getFeatureFlagsBaseOnAPIFlag(t *testing.T) *config.FeatureFlags {
t.Helper()
alphaFeatureFlags, err := config.NewFeatureFlagsFromMap(map[string]string{
"enable-api-fields": "alpha",
"results-from": "sidecar-logs",
"enable-tekton-oci-bundles": "true",
"enable-cel-in-whenexpression": "true",
"enable-param-enum": "true",
"enable-artifacts": "true",
"enable-concise-resolver-syntax": "true",
"enable-kubernetes-sidecar": "true",
"keep-pod-on-cancel": "true",
})
if err != nil {
t.Fatalf("error creating alpha feature flags configmap: %v", err)
}
betaFeatureFlags, err := config.NewFeatureFlagsFromMap(map[string]string{
"results-from": "sidecar-logs",
"enable-api-fields": "beta",
"keep-pod-on-cancel": "true",
})
if err != nil {
t.Fatalf("error creating beta feature flags configmap: %v", err)
}
stableFeatureFlags, err := config.NewFeatureFlagsFromMap(map[string]string{
"enable-api-fields": "stable",
})
if err != nil {
t.Fatalf("error creating stable feature flags configmap: %v", err)
}
enabledFeatureGate, err := getAPIFeatureGate()
if err != nil {
t.Fatalf("error reading enabled feature gate: %v", err)
}
switch enabledFeatureGate {
case "alpha":
return alphaFeatureFlags
case "beta":
return betaFeatureFlags
default:
return stableFeatureFlags
}
}
// getAPIFeatureGate queries the tekton pipelines namespace for the
// current value of the "enable-api-fields" feature gate.
func getAPIFeatureGate() (string, error) {
ns := os.Getenv("SYSTEM_NAMESPACE")
if ns == "" {
ns = "tekton-pipelines"
}
cmd := exec.Command("kubectl", "get", "configmap", "feature-flags", "-n", ns, "-o", `jsonpath="{.data['enable-api-fields']}"`)
output, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("error getting feature-flags configmap: %w", err)
}
output = bytes.TrimSpace(output)
output = bytes.Trim(output, "\"")
if len(output) == 0 {
output = []byte("stable")
}
return string(output), nil
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"log"
"net/http"
)
func handler(w http.ResponseWriter, r *http.Request) {
log.Print("Hello world received a request.")
fmt.Fprintf(w, "Hello World! \n")
}
func main() {
log.Print("Hello world sample started.")
http.HandleFunc("/", handler)
//nolint: gosec
if err := http.ListenAndServe(":8080", nil); err != nil {
panic(err)
}
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"bytes"
"os/exec"
"regexp"
)
var (
defaultNamespaceRE = regexp.MustCompile("namespace: default")
)
func kubectlCreate(input []byte, namespace string) ([]byte, error) {
cmd := exec.Command("kubectl", "create", "-n", namespace, "-f", "-")
cmd.Stdin = bytes.NewReader(input)
return cmd.CombinedOutput()
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"os"
"runtime"
"testing"
"k8s.io/apimachinery/pkg/util/sets"
)
var (
imageNames = initImageNames()
excludedTests = initExcludedTests()
)
const (
// Busybox image with specific sha
busyboxImage = iota
// Registry image
registryImage
// kubectl image
kanikoImage
// dockerize image
dockerizeImage
)
// getTestArch returns architecture of the cluster where test suites will be executed.
// default value is similar to build architecture, TEST_RUNTIME_ARCH is used when test target cluster has another architecture
func getTestArch() string {
val, ok := os.LookupEnv("TEST_RUNTIME_ARCH")
if ok {
return val
}
return runtime.GOARCH
}
// initImageNames returns the map with arch dependent image names for e2e tests
func initImageNames() map[int]string {
switch getTestArch() {
case "s390x":
return map[int]string{
busyboxImage: "mirror.gcr.io/busybox@sha256:2f9af5cf39068ec3a9e124feceaa11910c511e23a1670dcfdff0bc16793545fb",
registryImage: "mirror.gcr.io/ibmcom/registry:2.6.2.5",
kanikoImage: "gcr.io/kaniko-project/executor:s390x-9ed158c1f63a059cde4fd5f8b95af51d452d9aa7",
dockerizeImage: "mirror.gcr.io/ibmcom/dockerize-s390x",
}
case "ppc64le":
return map[int]string{
busyboxImage: "mirror.gcr.io/busybox@sha256:2f9af5cf39068ec3a9e124feceaa11910c511e23a1670dcfdff0bc16793545fb",
registryImage: "mirror.gcr.io/ppc64le/registry:2",
kanikoImage: "mirror.gcr.io/ibmcom/kaniko-project-executor-ppc64le:v0.17.1",
dockerizeImage: "mirror.gcr.io/ibmcom/dockerize-ppc64le",
}
default:
return map[int]string{
busyboxImage: "mirror.gcr.io/busybox@sha256:2f9af5cf39068ec3a9e124feceaa11910c511e23a1670dcfdff0bc16793545fb",
registryImage: "mirror.gcr.io/library/registry",
kanikoImage: "gcr.io/kaniko-project/executor:v1.3.0",
dockerizeImage: "mirror.gcr.io/jwilder/dockerize",
}
}
}
// initExcludedTests provides list of excluded tests for e2e and exanples tests
func initExcludedTests() sets.String {
switch getTestArch() {
case "s390x":
return sets.NewString(
// Git resolver test using local Gitea instance
"TestGitResolver_API",
// examples
"TestExamples/v1alpha1/taskruns/gcs-resource",
"TestExamples/v1beta1/taskruns/gcs-resource",
"TestExamples/v1beta1/taskruns/creds-init-only-mounts-provided-credentials",
)
case "ppc64le":
return sets.NewString(
// Git resolver test using local Gitea instance
"TestGitResolver_API",
// examples
"TestExamples/v1alpha1/taskruns/gcs-resource",
"TestExamples/v1beta1/taskruns/gcs-resource",
)
}
return sets.NewString()
}
// getTestImage gets test image based on unique id
func getTestImage(image int) string {
return imageNames[image]
}
// skipIfExcluded checks if test name is in the excluded list and skip it
func skipIfExcluded(t *testing.T) {
t.Helper()
if excludedTests.Has(t.Name()) {
t.Skipf("skip for %s architecture", getTestArch())
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package names
import (
utilrand "k8s.io/apimachinery/pkg/util/rand"
)
// TestingSeed used to set the random name generator to a fixed seed for testing
func TestingSeed() {
utilrand.Seed(12345)
}
/*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parse
import (
"testing"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// MustParseV1alpha1StepAction takes YAML and parses it into a *v1alpha1.StepAction
func MustParseV1alpha1StepAction(t *testing.T, yaml string) *v1alpha1.StepAction {
t.Helper()
var sa v1alpha1.StepAction
yaml = `apiVersion: tekton.dev/v1alpha1
kind: StepAction
` + yaml
mustParseYAML(t, yaml, &sa)
return &sa
}
// MustParseV1beta1StepAction takes YAML and parses it into a *v1alpha1.StepAction
func MustParseV1beta1StepAction(t *testing.T, yaml string) *v1beta1.StepAction {
t.Helper()
var sa v1beta1.StepAction
yaml = `apiVersion: tekton.dev/v1beta1
kind: StepAction
` + yaml
mustParseYAML(t, yaml, &sa)
return &sa
}
// MustParseV1beta1TaskRun takes YAML and parses it into a *v1beta1.TaskRun
func MustParseV1beta1TaskRun(t *testing.T, yaml string) *v1beta1.TaskRun {
t.Helper()
var tr v1beta1.TaskRun
yaml = `apiVersion: tekton.dev/v1beta1
kind: TaskRun
` + yaml
mustParseYAML(t, yaml, &tr)
return &tr
}
// MustParseV1TaskRun takes YAML and parses it into a *v1.TaskRun
func MustParseV1TaskRun(t *testing.T, yaml string) *v1.TaskRun {
t.Helper()
var tr v1.TaskRun
yaml = `apiVersion: tekton.dev/v1
kind: TaskRun
` + yaml
mustParseYAML(t, yaml, &tr)
return &tr
}
// MustParseV1beta1Task takes YAML and parses it into a *v1beta1.Task
func MustParseV1beta1Task(t *testing.T, yaml string) *v1beta1.Task {
t.Helper()
var task v1beta1.Task
yaml = `apiVersion: tekton.dev/v1beta1
kind: Task
` + yaml
mustParseYAML(t, yaml, &task)
return &task
}
// MustParseV1beta1TaskAndSetDefaults takes YAML and parses it into a *v1beta1.Task and sets defaults
func MustParseV1beta1TaskAndSetDefaults(t *testing.T, yaml string) *v1beta1.Task {
t.Helper()
task := MustParseV1beta1Task(t, yaml)
task.SetDefaults(t.Context())
return task
}
// MustParseCustomRun takes YAML and parses it into a *v1beta1.CustomRun
func MustParseCustomRun(t *testing.T, yaml string) *v1beta1.CustomRun {
t.Helper()
var r v1beta1.CustomRun
yaml = `apiVersion: tekton.dev/v1beta1
kind: CustomRun
` + yaml
mustParseYAML(t, yaml, &r)
return &r
}
// MustParseV1Task takes YAML and parses it into a *v1.Task
func MustParseV1Task(t *testing.T, yaml string) *v1.Task {
t.Helper()
var task v1.Task
yaml = `apiVersion: tekton.dev/v1
kind: Task
` + yaml
mustParseYAML(t, yaml, &task)
return &task
}
// MustParseV1TaskAndSetDefaults takes YAML and parses it into a *v1.Task and sets defaults
func MustParseV1TaskAndSetDefaults(t *testing.T, yaml string) *v1.Task {
t.Helper()
task := MustParseV1Task(t, yaml)
task.SetDefaults(t.Context())
return task
}
// MustParseV1beta1PipelineRun takes YAML and parses it into a *v1beta1.PipelineRun
func MustParseV1beta1PipelineRun(t *testing.T, yaml string) *v1beta1.PipelineRun {
t.Helper()
var pr v1beta1.PipelineRun
yaml = `apiVersion: tekton.dev/v1beta1
kind: PipelineRun
` + yaml
mustParseYAML(t, yaml, &pr)
return &pr
}
// MustParseV1PipelineRun takes YAML and parses it into a *v1.PipelineRun
func MustParseV1PipelineRun(t *testing.T, yaml string) *v1.PipelineRun {
t.Helper()
var pr v1.PipelineRun
yaml = `apiVersion: tekton.dev/v1
kind: PipelineRun
` + yaml
mustParseYAML(t, yaml, &pr)
return &pr
}
// MustParseV1beta1Pipeline takes YAML and parses it into a *v1beta1.Pipeline
func MustParseV1beta1Pipeline(t *testing.T, yaml string) *v1beta1.Pipeline {
t.Helper()
var pipeline v1beta1.Pipeline
yaml = `apiVersion: tekton.dev/v1beta1
kind: Pipeline
` + yaml
mustParseYAML(t, yaml, &pipeline)
return &pipeline
}
// MustParseV1beta1PipelineAndSetDefaults takes YAML and parses it into a *v1beta1.Pipeline and sets defaults
func MustParseV1beta1PipelineAndSetDefaults(t *testing.T, yaml string) *v1beta1.Pipeline {
t.Helper()
p := MustParseV1beta1Pipeline(t, yaml)
p.SetDefaults(t.Context())
return p
}
// MustParseV1Pipeline takes YAML and parses it into a *v1.Pipeline
func MustParseV1Pipeline(t *testing.T, yaml string) *v1.Pipeline {
t.Helper()
var pipeline v1.Pipeline
yaml = `apiVersion: tekton.dev/v1
kind: Pipeline
` + yaml
mustParseYAML(t, yaml, &pipeline)
return &pipeline
}
// MustParseV1PipelineAndSetDefaults takes YAML and parses it into a *v1.Pipeline and sets defaults
func MustParseV1PipelineAndSetDefaults(t *testing.T, yaml string) *v1.Pipeline {
t.Helper()
p := MustParseV1Pipeline(t, yaml)
p.SetDefaults(t.Context())
return p
}
// MustParseVerificationPolicy takes YAML and parses it into a *v1alpha1.VerificationPolicy
func MustParseVerificationPolicy(t *testing.T, yaml string) *v1alpha1.VerificationPolicy {
t.Helper()
var v v1alpha1.VerificationPolicy
yaml = `apiVersion: tekton.dev/v1alpha1
kind: VerificationPolicy
` + yaml
mustParseYAML(t, yaml, &v)
return &v
}
func mustParseYAML(t *testing.T, yaml string, i runtime.Object) {
t.Helper()
if _, _, err := scheme.Codecs.UniversalDeserializer().Decode([]byte(yaml), nil, i); err != nil {
t.Fatalf("mustParseYAML (%s): %v", yaml, err)
}
}
// MustParseTaskRunWithObjectMeta parses YAML to *v1.TaskRun and adds objectMeta to it
func MustParseTaskRunWithObjectMeta(t *testing.T, objectMeta metav1.ObjectMeta, asYAML string) *v1.TaskRun {
t.Helper()
tr := MustParseV1TaskRun(t, asYAML)
tr.ObjectMeta = objectMeta
return tr
}
// MustParseCustomRunWithObjectMeta parses YAML to *v1beta1.CustomRun and adds objectMeta to it
func MustParseCustomRunWithObjectMeta(t *testing.T, objectMeta metav1.ObjectMeta, asYAML string) *v1beta1.CustomRun {
t.Helper()
r := MustParseCustomRun(t, asYAML)
r.ObjectMeta = objectMeta
return r
}
// MustParseChildPipelineRunWithObjectMeta parses YAML to *v1.PipelineRun and adds objectMeta to it
func MustParseChildPipelineRunWithObjectMeta(t *testing.T, objectMeta metav1.ObjectMeta, asYAML string) *v1.PipelineRun {
t.Helper()
pr := MustParseV1PipelineRun(t, asYAML)
pr.ObjectMeta = objectMeta
return pr
}
/*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"archive/tar"
"bytes"
"errors"
"fmt"
"reflect"
"strings"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/mutate"
remoteimg "github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/tarball"
tkremote "github.com/tektoncd/pipeline/pkg/remote/oci"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/yaml"
)
// ObjectAnnotationMapper is a func alias that maps a runtime Object to the Tekton Bundle annotations map.
type ObjectAnnotationMapper func(object runtime.Object) map[string]string
// DefaultObjectAnnotationMapper does the "right" thing by conforming to the Tekton Bundle spec.
var DefaultObjectAnnotationMapper = func(obj runtime.Object) map[string]string {
return map[string]string{
tkremote.TitleAnnotation: GetObjectName(obj),
tkremote.KindAnnotation: strings.TrimSuffix(strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind), "s"),
tkremote.APIVersionAnnotation: obj.GetObjectKind().GroupVersionKind().Version,
}
}
// CreateImage will push a new OCI image artifact with the provided raw data object as a layer and return the full image
// reference with a digest to fetch the image. Key must be specified as [lowercase kind]/[object name]. The image ref
// with a digest is returned.
func CreateImage(ref string, objs ...runtime.Object) (string, error) {
return CreateImageWithAnnotations(ref, DefaultObjectAnnotationMapper, objs...)
}
// CreateImageWithAnnotations is the base form of #CreateImage which accepts an ObjectAnnotationMapper to map an object
// to the annotations for it.
func CreateImageWithAnnotations(ref string, mapper ObjectAnnotationMapper, objs ...runtime.Object) (string, error) {
imgRef, err := name.ParseReference(ref)
if err != nil {
return "", fmt.Errorf("undexpected error producing image reference %w", err)
}
img := empty.Image
for _, obj := range objs {
data, err := yaml.Marshal(obj)
if err != nil {
return "", fmt.Errorf("error serializing object: %w", err)
}
// Compress the data into a tarball.
var tarbundle bytes.Buffer
writer := tar.NewWriter(&tarbundle)
if err := writer.WriteHeader(&tar.Header{
Name: GetObjectName(obj),
Mode: 0o600,
Size: int64(len(data)),
Typeflag: tar.TypeReg,
}); err != nil {
return "", err
}
if _, err := writer.Write(data); err != nil {
return "", err
}
if err := writer.Close(); err != nil {
return "", err
}
layer, err := tarball.LayerFromReader(&tarbundle)
if err != nil {
return "", fmt.Errorf("unexpected error adding layer to image %w", err)
}
annotations := mapper(obj)
img, err = mutate.Append(img, mutate.Addendum{
Layer: layer,
Annotations: annotations,
})
if err != nil {
return "", fmt.Errorf("could not add layer to image %w", err)
}
}
if err := remoteimg.Write(imgRef, img); err != nil {
return "", errors.New("could not push example image to registry")
}
digest, err := img.Digest()
if err != nil {
return "", fmt.Errorf("could not read image digest: %w", err)
}
return imgRef.Context().Digest(digest.String()).String(), nil
}
// GetObjectName returns the ObjectMetadata.Name field which every resource should have.
func GetObjectName(obj runtime.Object) string {
return reflect.Indirect(reflect.ValueOf(obj)).FieldByName("ObjectMeta").FieldByName("Name").String()
}
/*
Copyright 2024 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"context"
"errors"
"fmt"
"strings"
"github.com/google/go-cmp/cmp"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
resource "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
resolution "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/test/diff"
)
var _ resource.Requester = &Requester{}
var _ resolution.ResolvedResource = &ResolvedResource{}
// NewResolvedResource creates a mock resolved resource that is
// populated with the given data and annotations or returns the given
// error from its Data() method.
func NewResolvedResource(data []byte, annotations map[string]string, source *pipelinev1.RefSource, dataErr error) *ResolvedResource {
return &ResolvedResource{
ResolvedData: data,
ResolvedAnnotations: annotations,
ResolvedRefSource: source,
DataErr: dataErr,
}
}
// NewRequester creates a mock requester that resolves to the given
// resource or returns the given error on Submit().
func NewRequester(resource resolution.ResolvedResource, err error, resolverPayload resource.ResolverPayload) *Requester {
return &Requester{
ResolvedResource: resource,
SubmitErr: err,
ResolverPayload: resolverPayload,
}
}
// Requester implements resolution.Requester and makes it easier
// to mock the outcome of a remote pipelineRef or taskRef resolution.
type Requester struct {
// The resolved resource object to return when a request is
// submitted.
ResolvedResource resolution.ResolvedResource
// An error to return when a request is submitted.
SubmitErr error
// ResolverPayload that should match that of the request in order to return the resolved resource
ResolverPayload resource.ResolverPayload
}
// Submit implements resolution.Requester, accepting the name of a
// resolver and a request for a specific remote file, and then returns
// whatever mock data was provided on initialization.
func (r *Requester) Submit(ctx context.Context, resolverName resolution.ResolverName, req resource.Request) (resolution.ResolvedResource, error) {
if (r.ResolverPayload == resource.ResolverPayload{} || r.ResolverPayload.ResolutionSpec == nil || len(r.ResolverPayload.ResolutionSpec.Params) == 0) {
return r.ResolvedResource, r.SubmitErr
}
if r.ResolverPayload.ResolutionSpec.URL == "" {
return r.ResolvedResource, r.SubmitErr
}
reqParams := make(map[string]pipelinev1.ParamValue)
for _, p := range req.ResolverPayload().ResolutionSpec.Params {
reqParams[p.Name] = p.Value
}
var wrongParams []string
for _, p := range r.ResolverPayload.ResolutionSpec.Params {
if reqValue, ok := reqParams[p.Name]; !ok {
wrongParams = append(wrongParams, fmt.Sprintf("expected %s param to be %#v, but was %#v", p.Name, p.Value, reqValue))
} else if d := cmp.Diff(p.Value, reqValue); d != "" {
wrongParams = append(wrongParams, fmt.Sprintf("%s param did not match: %s", p.Name, diff.PrintWantGot(d)))
}
}
if len(wrongParams) > 0 {
return nil, errors.New(strings.Join(wrongParams, "; "))
}
if r.ResolverPayload.ResolutionSpec.URL != req.ResolverPayload().ResolutionSpec.URL {
return nil, fmt.Errorf("Resolution name did not match. Got %s; Want %s", req.ResolverPayload().ResolutionSpec.URL, r.ResolverPayload.ResolutionSpec.URL)
}
return r.ResolvedResource, r.SubmitErr
}
// ResolvedResource implements resolution.ResolvedResource and makes
// it easier to mock the resolved content of a fetched pipeline or task.
type ResolvedResource struct {
// The resolved bytes to return when resolution is complete.
ResolvedData []byte
// An error to return instead of the resolved bytes after
// resolution completes.
DataErr error
// Annotations to return when resolution is complete.
ResolvedAnnotations map[string]string
// ResolvedRefSource to return the source reference of the remote data
ResolvedRefSource *pipelinev1.RefSource
}
// Data implements resolution.ResolvedResource and returns the mock
// data and/or error given to it on initialization.
func (r *ResolvedResource) Data() ([]byte, error) {
return r.ResolvedData, r.DataErr
}
// Annotations implements resolution.ResolvedResource and returns
// the mock annotations given to it on initialization.
func (r *ResolvedResource) Annotations() map[string]string {
return r.ResolvedAnnotations
}
// RefSource is the source reference of the remote data that records where the remote
// file came from including the url, digest and the entrypoint.
func (r *ResolvedResource) RefSource() *pipelinev1.RefSource {
return r.ResolvedRefSource
}
// RawRequest stores the raw request data
type RawRequest struct {
ResolverPayload resource.ResolverPayload
}
// Request returns a Request interface based on the RawRequest.
func (r *RawRequest) Request() resource.Request {
if r == nil {
r = &RawRequest{}
}
return &Request{
RawRequest: *r,
}
}
// Request implements resolution.Request and makes it easier to mock input for submit
// Using inline structs is to avoid conflicts between field names and method names.
type Request struct {
RawRequest
}
var _ resource.Request = &Request{}
// NewRequest creates a mock request that is populated with the given name namespace and params
func NewRequest(resolverPayload resource.ResolverPayload) *Request {
return &Request{
RawRequest: RawRequest{
ResolverPayload: resolverPayload,
},
}
}
// Params implements resolution.Request and returns the mock params given to it on initialization.
func (r *Request) ResolverPayload() resource.ResolverPayload {
return r.RawRequest.ResolverPayload
}
var _ resource.Request = &Request{}
/*
Copyright 2023 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"context"
"errors"
"fmt"
"strings"
"github.com/google/go-cmp/cmp"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
common "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/test/diff"
)
var _ common.Requester = &Requester{}
var _ common.ResolvedResource = &ResolvedResource{}
// NewRequester creates a mock requester that resolves to the given
// resource or returns the given error on Submit().
func NewRequester(resource common.ResolvedResource, err error) *Requester {
return &Requester{
ResolvedResource: resource,
SubmitErr: err,
}
}
// NewResolvedResource creates a mock resolved resource that is
// populated with the given data and annotations or returns the given
// error from its Data() method.
func NewResolvedResource(data []byte, annotations map[string]string, source *pipelinev1.RefSource, dataErr error) *ResolvedResource {
return &ResolvedResource{
ResolvedData: data,
ResolvedAnnotations: annotations,
ResolvedRefSource: source,
DataErr: dataErr,
}
}
// Requester implements resolution.Requester and makes it easier
// to mock the outcome of a remote pipelineRef or taskRef resolution.
type Requester struct {
// The resolved resource object to return when a request is
// submitted.
ResolvedResource common.ResolvedResource
// An error to return when a request is submitted.
SubmitErr error
// Params that should match those on the request in order to return the resolved resource
Params []pipelinev1.Param
}
// Submit implements resolution.Requester, accepting the name of a
// resolver and a request for a specific remote file, and then returns
// whatever mock data was provided on initialization.
func (r *Requester) Submit(ctx context.Context, resolverName common.ResolverName, req common.Request) (common.ResolvedResource, error) {
if len(r.Params) == 0 {
return r.ResolvedResource, r.SubmitErr
}
reqParams := make(map[string]pipelinev1.ParamValue)
for _, p := range req.Params() {
reqParams[p.Name] = p.Value
}
var wrongParams []string
for _, p := range r.Params {
if reqValue, ok := reqParams[p.Name]; !ok {
wrongParams = append(wrongParams, fmt.Sprintf("expected %s param to be %#v, but was %#v", p.Name, p.Value, reqValue))
} else if d := cmp.Diff(p.Value, reqValue); d != "" {
wrongParams = append(wrongParams, fmt.Sprintf("%s param did not match: %s", p.Name, diff.PrintWantGot(d)))
}
}
if len(wrongParams) > 0 {
return nil, errors.New(strings.Join(wrongParams, "; "))
}
return r.ResolvedResource, r.SubmitErr
}
// ResolvedResource implements resolution.ResolvedResource and makes
// it easier to mock the resolved content of a fetched pipeline or task.
type ResolvedResource struct {
// The resolved bytes to return when resolution is complete.
ResolvedData []byte
// An error to return instead of the resolved bytes after
// resolution completes.
DataErr error
// Annotations to return when resolution is complete.
ResolvedAnnotations map[string]string
// ResolvedRefSource to return the source reference of the remote data
ResolvedRefSource *pipelinev1.RefSource
}
// Data implements resolution.ResolvedResource and returns the mock
// data and/or error given to it on initialization.
func (r *ResolvedResource) Data() ([]byte, error) {
return r.ResolvedData, r.DataErr
}
// Annotations implements resolution.ResolvedResource and returns
// the mock annotations given to it on initialization.
func (r *ResolvedResource) Annotations() map[string]string {
return r.ResolvedAnnotations
}
// RefSource is the source reference of the remote data that records where the remote
// file came from including the url, digest and the entrypoint.
func (r *ResolvedResource) RefSource() *pipelinev1.RefSource {
return r.ResolvedRefSource
}
// RawRequest stores the raw request data
type RawRequest struct {
// the request name
Name string
// the request namespace
Namespace string
// the params for the request
Params []pipelinev1.Param
}
// Request returns a Request interface based on the RawRequest.
func (r *RawRequest) Request() common.Request {
if r == nil {
r = &RawRequest{}
}
return &Request{
RawRequest: *r,
}
}
// Request implements resolution.Request and makes it easier to mock input for submit
// Using inline structs is to avoid conflicts between field names and method names.
type Request struct {
RawRequest
}
var _ common.Request = &Request{}
// NewRequest creates a mock request that is populated with the given name namespace and params
func NewRequest(name, namespace string, params []pipelinev1.Param) *Request {
return &Request{
RawRequest: RawRequest{
Name: name,
Namespace: namespace,
Params: params,
},
}
}
// Name implements resolution.Request and returns the mock name given to it on initialization.
func (r *Request) Name() string {
return r.RawRequest.Name
}
// Namespace implements resolution.Request and returns the mock namespace given to it on initialization.
func (r *Request) Namespace() string {
return r.RawRequest.Namespace
}
// Params implements resolution.Request and returns the mock params given to it on initialization.
func (r *Request) Params() pipelinev1.Params {
return r.RawRequest.Params
}
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"bytes"
"context"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"os"
"path/filepath"
"testing"
"github.com/sigstore/sigstore/pkg/cryptoutils"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/tektoncd/pipeline/pkg/apis/config"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
fakek8s "k8s.io/client-go/kubernetes/fake"
"knative.dev/pkg/logging"
)
// TODO(#5820): refactor those into an internal pkg
const (
namespace = "trusted-resources"
// signatureAnnotation is the key of signature in annotation map
signatureAnnotation = "tekton.dev/signature"
)
var read = readPasswordFn
// SetupTrustedResourceConfig configures the trusted-resources-verification-no-match-policy feature flag with the given mode for testing
func SetupTrustedResourceConfig(ctx context.Context, verificationNoMatchPolicy string) context.Context {
store := config.NewStore(logging.FromContext(ctx).Named("config-store"))
featureflags := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: "feature-flags",
},
Data: map[string]string{
"trusted-resources-verification-no-match-policy": verificationNoMatchPolicy,
},
}
store.OnConfigChanged(featureflags)
return store.ToContext(ctx)
}
// SetupVerificationPolicies set verification policies and secrets to store public keys.
// This function helps to setup 4 kinds of VerificationPolicies:
// 1. One public key in inline data
// 2. One public key in secret
// 3. the policy pattern doesn't match any resources
// 4. warn mode policy without keys
// SignerVerifier is returned to sign resources
// The k8s clientset is returned to fetch secret from it.
// VerificationPolicies are returned to fetch public keys
func SetupVerificationPolicies(t *testing.T) (signature.SignerVerifier, *ecdsa.PrivateKey, *fakek8s.Clientset, []*v1alpha1.VerificationPolicy) {
t.Helper()
sv, keys, pub, err := GenerateKeys(elliptic.P256(), crypto.SHA256)
if err != nil {
t.Fatalf("failed to generate keys %v", err)
}
_, _, pub2, err := GenerateKeys(elliptic.P256(), crypto.SHA256)
if err != nil {
t.Fatalf("failed to generate keys %v", err)
}
secret := &corev1.Secret{
Data: map[string][]byte{"cosign.pub": pub},
ObjectMeta: metav1.ObjectMeta{
Name: "verification-secrets",
Namespace: namespace,
},
}
keyInDataVp := getVerificationPolicy(
"keyInDataVp",
namespace,
[]v1alpha1.ResourcePattern{
{Pattern: "https://github.com/tektoncd/catalog.git"},
},
[]v1alpha1.Authority{
{
Name: "pubkey",
Key: &v1alpha1.KeyRef{
Data: string(pub),
HashAlgorithm: "sha256",
},
},
}, v1alpha1.ModeEnforce)
keyInSecretVp := getVerificationPolicy(
"keyInSecretVp",
namespace,
[]v1alpha1.ResourcePattern{
{
Pattern: "gcr.io/tekton-releases/catalog/upstream/git-clone",
},
},
[]v1alpha1.Authority{
{
Name: "pubkey",
Key: &v1alpha1.KeyRef{
SecretRef: &corev1.SecretReference{
Name: secret.Name,
Namespace: secret.Namespace,
},
HashAlgorithm: "sha256",
},
},
}, v1alpha1.ModeEnforce)
wrongKeyandPatternVp := getVerificationPolicy(
"wrongKeyInDataVp",
namespace,
[]v1alpha1.ResourcePattern{
{Pattern: "this should not match any resources"},
},
[]v1alpha1.Authority{
{
Name: "pubkey",
Key: &v1alpha1.KeyRef{
Data: string(pub2),
HashAlgorithm: "sha256",
},
},
}, v1alpha1.ModeEnforce)
warnModeVP := getVerificationPolicy(
"warnModeVP",
namespace,
[]v1alpha1.ResourcePattern{
{
Pattern: "warnVP",
},
},
[]v1alpha1.Authority{
{
Name: "pubkey",
Key: &v1alpha1.KeyRef{
SecretRef: &corev1.SecretReference{
Name: secret.Name,
Namespace: secret.Namespace,
},
HashAlgorithm: "sha256",
},
},
}, v1alpha1.ModeWarn)
k8sclient := fakek8s.NewSimpleClientset(secret)
return sv, keys, k8sclient, []*v1alpha1.VerificationPolicy{&keyInDataVp, &keyInSecretVp, &wrongKeyandPatternVp, &warnModeVP}
}
// SetupMatchAllVerificationPolicies set verification policies with a Pattern to match all resources
// SignerVerifier is returned to sign resources
// The k8s clientset is returned to fetch secret from it.
// VerificationPolicies are returned to fetch public keys
func SetupMatchAllVerificationPolicies(t *testing.T, namespace string) (signature.SignerVerifier, *fakek8s.Clientset, []*v1alpha1.VerificationPolicy) {
t.Helper()
sv, _, pub, err := GenerateKeys(elliptic.P256(), crypto.SHA256)
if err != nil {
t.Fatalf("failed to generate keys %v", err)
}
secret := &corev1.Secret{
Data: map[string][]byte{"cosign.pub": pub},
ObjectMeta: metav1.ObjectMeta{
Name: "verification-secrets",
Namespace: namespace,
},
}
matchAllVp := getVerificationPolicy(
"matchAllVp",
namespace,
[]v1alpha1.ResourcePattern{
{Pattern: ".*"},
},
[]v1alpha1.Authority{
{
Name: "pubkey",
Key: &v1alpha1.KeyRef{
Data: string(pub),
HashAlgorithm: "sha256",
},
},
}, v1alpha1.ModeEnforce)
k8sclient := fakek8s.NewSimpleClientset(secret)
return sv, k8sclient, []*v1alpha1.VerificationPolicy{&matchAllVp}
}
// GetSignerFromFile generates key files to tmpdir, return signer and pubkey path
func GetSignerFromFile(ctx context.Context, t *testing.T) (signature.Signer, string) {
t.Helper()
sv, _, pub, err := GenerateKeys(elliptic.P256(), crypto.SHA256)
if err != nil {
t.Fatal(err)
}
tmpDir := t.TempDir()
pubKey := filepath.Join(tmpDir, "ecdsa.pub")
if err := os.WriteFile(pubKey, pub, 0o600); err != nil {
t.Fatal(err)
}
return sv, pubKey
}
// GetKeysFromFile generates key files to tmpdir, return keys and pubkey path
func GetKeysFromFile(ctx context.Context, t *testing.T) (*ecdsa.PrivateKey, string) {
t.Helper()
_, keys, pub, err := GenerateKeys(elliptic.P256(), crypto.SHA256)
if err != nil {
t.Fatal(err)
}
tmpDir := t.TempDir()
pubKey := filepath.Join(tmpDir, "ecdsa.pub")
if err := os.WriteFile(pubKey, pub, 0o600); err != nil {
t.Fatal(err)
}
return keys, pubKey
}
// GenerateKeys creates public key files, return the SignerVerifier
func GenerateKeys(c elliptic.Curve, hashFunc crypto.Hash) (signature.SignerVerifier, *ecdsa.PrivateKey, []byte, error) {
keys, err := ecdsa.GenerateKey(c, rand.Reader)
if err != nil {
return nil, nil, nil, err
}
// Now do the public key
pubBytes, err := cryptoutils.MarshalPublicKeyToPEM(keys.Public())
if err != nil {
return nil, nil, nil, err
}
sv, err := signature.LoadSignerVerifier(keys, hashFunc)
if err != nil {
return nil, nil, nil, err
}
return sv, keys, pubBytes, nil
}
// signInterface returns the encoded signature for the given object.
func signInterface(signer signature.Signer, i interface{}) ([]byte, error) {
if signer == nil {
return nil, errors.New("signer is nil")
}
b, err := json.Marshal(i)
if err != nil {
return nil, err
}
h := sha256.New()
h.Write(b)
sig, err := signer.SignMessage(bytes.NewReader(h.Sum(nil)))
if err != nil {
return nil, err
}
return sig, nil
}
// GetSignedV1beta1Pipeline signed the given pipeline and rename it with given name
func GetSignedV1beta1Pipeline(unsigned *v1beta1.Pipeline, signer signature.Signer, name string) (*v1beta1.Pipeline, error) {
signedPipeline := unsigned.DeepCopy()
signedPipeline.Name = name
if signedPipeline.Annotations == nil {
signedPipeline.Annotations = map[string]string{}
}
signature, err := signInterface(signer, signedPipeline)
if err != nil {
return nil, err
}
signedPipeline.Annotations[signatureAnnotation] = base64.StdEncoding.EncodeToString(signature)
return signedPipeline, nil
}
// GetSignedV1beta1Task signed the given task and rename it with given name
func GetSignedV1beta1Task(unsigned *v1beta1.Task, signer signature.Signer, name string) (*v1beta1.Task, error) {
signedTask := unsigned.DeepCopy()
signedTask.Name = name
if signedTask.Annotations == nil {
signedTask.Annotations = map[string]string{}
}
signature, err := signInterface(signer, signedTask)
if err != nil {
return nil, err
}
signedTask.Annotations[signatureAnnotation] = base64.StdEncoding.EncodeToString(signature)
return signedTask, nil
}
// GetSignedV1Pipeline signed the given pipeline and rename it with given name
func GetSignedV1Pipeline(unsigned *v1.Pipeline, signer signature.Signer, name string) (*v1.Pipeline, error) {
signedPipeline := unsigned.DeepCopy()
signedPipeline.Name = name
if signedPipeline.Annotations == nil {
signedPipeline.Annotations = map[string]string{}
}
signature, err := signInterface(signer, signedPipeline)
if err != nil {
return nil, err
}
signedPipeline.Annotations[signatureAnnotation] = base64.StdEncoding.EncodeToString(signature)
return signedPipeline, nil
}
// GetSignedV1Task signed the given task and rename it with given name
func GetSignedV1Task(unsigned *v1.Task, signer signature.Signer, name string) (*v1.Task, error) {
signedTask := unsigned.DeepCopy()
signedTask.Name = name
if signedTask.Annotations == nil {
signedTask.Annotations = map[string]string{}
}
signature, err := signInterface(signer, signedTask)
if err != nil {
return nil, err
}
signedTask.Annotations[signatureAnnotation] = base64.StdEncoding.EncodeToString(signature)
return signedTask, nil
}
func getPass(confirm bool) ([]byte, error) {
read := read(confirm)
return read()
}
func readPasswordFn(confirm bool) func() ([]byte, error) {
pw, ok := os.LookupEnv("PRIVATE_PASSWORD")
if ok {
return func() ([]byte, error) {
return []byte(pw), nil
}
}
return func() ([]byte, error) {
return nil, errors.New("fail to get password")
}
}
func getVerificationPolicy(name, namespace string, patterns []v1alpha1.ResourcePattern, authorities []v1alpha1.Authority, mode v1alpha1.ModeType) v1alpha1.VerificationPolicy {
return v1alpha1.VerificationPolicy{
TypeMeta: metav1.TypeMeta{
Kind: "VerificationPolicy",
APIVersion: "v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1alpha1.VerificationPolicySpec{
Resources: patterns,
Authorities: authorities,
Mode: mode,
},
}
}
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Poll Pipeline resources
After creating Pipeline resources or making changes to them, you will need to
wait for the system to realize those changes. You can use polling methods to
check the resources reach the desired state.
The WaitFor* functions use the kubernetes
wait package (https://godoc.org/k8s.io/apimachinery/pkg/util/wait). To poll
they use
PollImmediate (https://godoc.org/k8s.io/apimachinery/pkg/util/wait#PollImmediate)
and the return values of the function you provide behave the same as
ConditionFunc (https://godoc.org/k8s.io/apimachinery/pkg/util/wait#ConditionFunc):
a boolean to indicate if the function should stop or continue polling, and an
error to indicate if there has been an error.
For example, you can poll a TaskRun object to wait for it to have a Status.Condition:
err = WaitForTaskRunState(c, hwTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) {
if len(tr.Status.Conditions) > 0 {
return true, nil
}
return false, nil
}, "TaskRunHasCondition")
*/
package test
import (
"context"
"fmt"
"strings"
"time"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"go.opencensus.io/trace"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"knative.dev/pkg/apis"
)
const (
interval = 1 * time.Second
timeout = 15 * time.Minute
v1Version = "v1"
v1beta1Version = "v1beta1"
)
// ConditionAccessorFn is a condition function used polling functions
type ConditionAccessorFn func(ca apis.ConditionAccessor) (bool, error)
func pollImmediateWithContext(ctx context.Context, fn func() (bool, error)) error {
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(context.Context) (bool, error) {
select {
case <-ctx.Done():
return true, ctx.Err()
default:
}
return fn()
})
}
// WaitForTaskRunState polls the status of the TaskRun called name from client every
// interval until inState returns `true` indicating it is done, returns an
// error or timeout. desc will be used to name the metric that is emitted to
// track how long it took for name to get into the state checked by inState.
// version will be used to determine the client to be applied for the wait.
func WaitForTaskRunState(ctx context.Context, c *clients, name string, inState ConditionAccessorFn, desc, version string) error {
metricName := fmt.Sprintf("WaitForTaskRunState/%s/%s", name, desc)
_, span := trace.StartSpan(ctx, metricName)
defer span.End()
return pollImmediateWithContext(ctx, func() (bool, error) {
switch version {
case v1Version:
r, err := c.V1TaskRunClient.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
}
return inState(&r.Status)
default:
r, err := c.V1beta1TaskRunClient.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
}
return inState(&r.Status)
}
})
}
// WaitForDeploymentState polls the status of the Deployment called name
// from client every interval until inState returns `true` indicating it is done,
// returns an error or timeout. desc will be used to name the metric that is emitted to
// track how long it took for name to get into the state checked by inState.
func WaitForDeploymentState(ctx context.Context, c *clients, name string, namespace string, inState func(d *appsv1.Deployment) (bool, error), desc string) error {
metricName := fmt.Sprintf("WaitForDeploymentState/%s/%s", name, desc)
_, span := trace.StartSpan(ctx, metricName)
defer span.End()
return pollImmediateWithContext(ctx, func() (bool, error) {
d, err := c.KubeClient.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
}
return inState(d)
})
}
// WaitForPodState polls the status of the Pod called name from client every
// interval until inState returns `true` indicating it is done, returns an
// error or timeout. desc will be used to name the metric that is emitted to
// track how long it took for name to get into the state checked by inState.
func WaitForPodState(ctx context.Context, c *clients, name string, namespace string, inState func(r *corev1.Pod) (bool, error), desc string) error {
metricName := fmt.Sprintf("WaitForPodState/%s/%s", name, desc)
_, span := trace.StartSpan(ctx, metricName)
defer span.End()
return pollImmediateWithContext(ctx, func() (bool, error) {
r, err := c.KubeClient.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
}
return inState(r)
})
}
// WaitForPVCIsDeleted polls the number of the PVC in the namespace from client every
// interval until all the PVCs in the namespace are deleted. It returns an
// error or timeout. desc will be used to name the metric that is emitted to
// track how long it took to delete all the PVCs in the namespace.
func WaitForPVCIsDeleted(ctx context.Context, c *clients, polltimeout time.Duration, name, namespace, desc string) error {
metricName := fmt.Sprintf("WaitForPVCIsDeleted/%s/%s", name, desc)
_, span := trace.StartSpan(ctx, metricName)
defer span.End()
ctx, cancel := context.WithTimeout(ctx, polltimeout)
defer cancel()
return pollImmediateWithContext(ctx, func() (bool, error) {
pvcList, err := c.KubeClient.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return true, err
}
if len(pvcList.Items) > 0 {
return false, nil
}
return true, nil
})
}
// WaitForPipelineRunState polls the status of the PipelineRun called name from client every
// interval until inState returns `true` indicating it is done, returns an
// error or timeout. desc will be used to name the metric that is emitted to
// track how long it took for name to get into the state checked by inState.
// version will be used to determine the client to be applied for the wait.
func WaitForPipelineRunState(ctx context.Context, c *clients, name string, polltimeout time.Duration, inState ConditionAccessorFn, desc, version string) error {
metricName := fmt.Sprintf("WaitForPipelineRunState/%s/%s", name, desc)
_, span := trace.StartSpan(ctx, metricName)
defer span.End()
ctx, cancel := context.WithTimeout(ctx, polltimeout)
defer cancel()
return pollImmediateWithContext(ctx, func() (bool, error) {
switch version {
case "v1":
r, err := c.V1PipelineRunClient.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
}
return inState(&r.Status)
default:
r, err := c.V1beta1PipelineRunClient.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
}
return inState(&r.Status)
}
})
}
// WaitForServiceExternalIPState polls the status of the a k8s Service called name from client every
// interval until an external ip is assigned indicating it is done, returns an
// error or timeout. desc will be used to name the metric that is emitted to
// track how long it took for name to get into the state checked by inState.
func WaitForServiceExternalIPState(ctx context.Context, c *clients, namespace, name string, inState func(s *corev1.Service) (bool, error), desc string) error {
metricName := fmt.Sprintf("WaitForServiceExternalIPState/%s/%s", name, desc)
_, span := trace.StartSpan(ctx, metricName)
defer span.End()
return pollImmediateWithContext(ctx, func() (bool, error) {
r, err := c.KubeClient.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
}
return inState(r)
})
}
// Succeed provides a poll condition function that checks if the ConditionAccessor
// resource has successfully completed or not.
func Succeed(name string) ConditionAccessorFn {
return func(ca apis.ConditionAccessor) (bool, error) {
c := ca.GetCondition(apis.ConditionSucceeded)
if c != nil {
if c.Status == corev1.ConditionTrue {
return true, nil
} else if c.Status == corev1.ConditionFalse {
return true, fmt.Errorf("%q failed", name)
}
}
return false, nil
}
}
// Failed provides a poll condition function that checks if the ConditionAccessor
// resource has failed or not.
func Failed(name string) ConditionAccessorFn {
return func(ca apis.ConditionAccessor) (bool, error) {
c := ca.GetCondition(apis.ConditionSucceeded)
if c != nil {
if c.Status == corev1.ConditionTrue {
return true, fmt.Errorf("%q succeeded", name)
} else if c.Status == corev1.ConditionFalse {
return true, nil
}
}
return false, nil
}
}
// FailedWithReason provides a poll function that checks if the ConditionAccessor
// resource has failed with the TimeoudOut reason
func FailedWithReason(reason, name string) ConditionAccessorFn {
return func(ca apis.ConditionAccessor) (bool, error) {
c := ca.GetCondition(apis.ConditionSucceeded)
if c != nil {
if c.Status == corev1.ConditionFalse {
if c.Reason == reason {
return true, nil
}
return true, fmt.Errorf("%q completed with the wrong reason: %s (message: %s)", name, c.Reason, c.Message)
} else if c.Status == corev1.ConditionTrue {
return true, fmt.Errorf("%q completed successfully, should have been failed with reason %q", name, reason)
}
}
return false, nil
}
}
// FailedWithMessage provides a poll function that checks if the ConditionAccessor
// resource has failed with the TimeoudOut reason
func FailedWithMessage(message, name string) ConditionAccessorFn {
return func(ca apis.ConditionAccessor) (bool, error) {
c := ca.GetCondition(apis.ConditionSucceeded)
if c != nil {
if c.Status == corev1.ConditionFalse {
if strings.Contains(c.Message, message) {
return true, nil
}
return true, fmt.Errorf("%q completed with the wrong message: %s", name, c.Message)
} else if c.Status == corev1.ConditionTrue {
return true, fmt.Errorf("%q completed successfully, should have been failed with message %q", name, message)
}
}
return false, nil
}
}
// Running provides a poll condition function that checks if the ConditionAccessor
// resource is currently running.
func Running(name string) ConditionAccessorFn {
return func(ca apis.ConditionAccessor) (bool, error) {
c := ca.GetCondition(apis.ConditionSucceeded)
if c != nil {
if c.Status == corev1.ConditionTrue || c.Status == corev1.ConditionFalse {
return true, fmt.Errorf(`%q already finished`, name)
} else if c.Status == corev1.ConditionUnknown && (c.Reason == "Running" || c.Reason == "Pending") {
return true, nil
}
}
return false, nil
}
}
// TaskRunSucceed provides a poll condition function that checks if the TaskRun
// has successfully completed.
func TaskRunSucceed(name string) ConditionAccessorFn {
return Succeed(name)
}
// TaskRunFailed provides a poll condition function that checks if the TaskRun
// has failed.
func TaskRunFailed(name string) ConditionAccessorFn {
return Failed(name)
}
// PipelineRunSucceed provides a poll condition function that checks if the PipelineRun
// has successfully completed.
func PipelineRunSucceed(name string) ConditionAccessorFn {
return Succeed(name)
}
// PipelineRunFailed provides a poll condition function that checks if the PipelineRun
// has failed.
func PipelineRunFailed(name string) ConditionAccessorFn {
return Failed(name)
}
// PipelineRunPending provides a poll condition function that checks if the PipelineRun
// has been marked pending by the Tekton controller.
func PipelineRunPending(name string) ConditionAccessorFn {
running := Running(name)
return func(ca apis.ConditionAccessor) (bool, error) {
c := ca.GetCondition(apis.ConditionSucceeded)
if c != nil {
if c.Status == corev1.ConditionUnknown && c.Reason == string(v1beta1.PipelineRunReasonPending) {
return true, nil
}
}
status, err := running(ca)
if status {
reason := ""
// c _should_ never be nil if we get here, but we have this check just in case.
if c != nil {
reason = c.Reason
}
return false, fmt.Errorf("status should be %s, but it is %s", v1beta1.PipelineRunReasonPending, reason)
}
return status, err
}
}
// Chain allows multiple ConditionAccessorFns to be chained together, checking the condition of each in order.
func Chain(fns ...ConditionAccessorFn) ConditionAccessorFn {
return func(ca apis.ConditionAccessor) (bool, error) {
for _, fn := range fns {
status, err := fn(ca)
if err != nil || !status {
return status, err
}
}
return true, nil
}
}